drm/amd/pm: fix the return value of pm message
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58
59 #include "ivsrcid/ivsrcid_vislands30.h"
60
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136         switch (link->dpcd_caps.dongle_type) {
137         case DISPLAY_DONGLE_NONE:
138                 return DRM_MODE_SUBCONNECTOR_Native;
139         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140                 return DRM_MODE_SUBCONNECTOR_VGA;
141         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142         case DISPLAY_DONGLE_DP_DVI_DONGLE:
143                 return DRM_MODE_SUBCONNECTOR_DVID;
144         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146                 return DRM_MODE_SUBCONNECTOR_HDMIA;
147         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148         default:
149                 return DRM_MODE_SUBCONNECTOR_Unknown;
150         }
151 }
152
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155         struct dc_link *link = aconnector->dc_link;
156         struct drm_connector *connector = &aconnector->base;
157         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158
159         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160                 return;
161
162         if (aconnector->dc_sink)
163                 subconnector = get_subconnector_type(link);
164
165         drm_object_property_set_value(&connector->base,
166                         connector->dev->mode_config.dp_subconnector_property,
167                         subconnector);
168 }
169
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182                                 struct drm_plane *plane,
183                                 unsigned long possible_crtcs,
184                                 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186                                struct drm_plane *plane,
187                                uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
190                                     uint32_t link_index,
191                                     struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193                                   struct amdgpu_encoder *aencoder,
194                                   uint32_t link_index);
195
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201                                   struct drm_atomic_state *state);
202
203 static void handle_cursor_update(struct drm_plane *plane,
204                                  struct drm_plane_state *old_plane_state);
205
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214
215 /*
216  * dm_vblank_get_counter
217  *
218  * @brief
219  * Get counter for number of vertical blanks
220  *
221  * @param
222  * struct amdgpu_device *adev - [in] desired amdgpu device
223  * int disp_idx - [in] which CRTC to get the counter from
224  *
225  * @return
226  * Counter for vertical blanks
227  */
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229 {
230         if (crtc >= adev->mode_info.num_crtc)
231                 return 0;
232         else {
233                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234
235                 if (acrtc->dm_irq_params.stream == NULL) {
236                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237                                   crtc);
238                         return 0;
239                 }
240
241                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
242         }
243 }
244
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246                                   u32 *vbl, u32 *position)
247 {
248         uint32_t v_blank_start, v_blank_end, h_position, v_position;
249
250         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251                 return -EINVAL;
252         else {
253                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254
255                 if (acrtc->dm_irq_params.stream ==  NULL) {
256                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257                                   crtc);
258                         return 0;
259                 }
260
261                 /*
262                  * TODO rework base driver to use values directly.
263                  * for now parse it back into reg-format
264                  */
265                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
266                                          &v_blank_start,
267                                          &v_blank_end,
268                                          &h_position,
269                                          &v_position);
270
271                 *position = v_position | (h_position << 16);
272                 *vbl = v_blank_start | (v_blank_end << 16);
273         }
274
275         return 0;
276 }
277
278 static bool dm_is_idle(void *handle)
279 {
280         /* XXX todo */
281         return true;
282 }
283
284 static int dm_wait_for_idle(void *handle)
285 {
286         /* XXX todo */
287         return 0;
288 }
289
290 static bool dm_check_soft_reset(void *handle)
291 {
292         return false;
293 }
294
295 static int dm_soft_reset(void *handle)
296 {
297         /* XXX todo */
298         return 0;
299 }
300
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
303                      int otg_inst)
304 {
305         struct drm_device *dev = adev_to_drm(adev);
306         struct drm_crtc *crtc;
307         struct amdgpu_crtc *amdgpu_crtc;
308
309         if (otg_inst == -1) {
310                 WARN_ON(1);
311                 return adev->mode_info.crtcs[0];
312         }
313
314         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315                 amdgpu_crtc = to_amdgpu_crtc(crtc);
316
317                 if (amdgpu_crtc->otg_inst == otg_inst)
318                         return amdgpu_crtc;
319         }
320
321         return NULL;
322 }
323
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325 {
326         return acrtc->dm_irq_params.freesync_config.state ==
327                        VRR_STATE_ACTIVE_VARIABLE ||
328                acrtc->dm_irq_params.freesync_config.state ==
329                        VRR_STATE_ACTIVE_FIXED;
330 }
331
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333 {
334         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336 }
337
338 /**
339  * dm_pflip_high_irq() - Handle pageflip interrupt
340  * @interrupt_params: ignored
341  *
342  * Handles the pageflip interrupt by notifying all interested parties
343  * that the pageflip has been completed.
344  */
345 static void dm_pflip_high_irq(void *interrupt_params)
346 {
347         struct amdgpu_crtc *amdgpu_crtc;
348         struct common_irq_params *irq_params = interrupt_params;
349         struct amdgpu_device *adev = irq_params->adev;
350         unsigned long flags;
351         struct drm_pending_vblank_event *e;
352         uint32_t vpos, hpos, v_blank_start, v_blank_end;
353         bool vrr_active;
354
355         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356
357         /* IRQ could occur when in initial stage */
358         /* TODO work and BO cleanup */
359         if (amdgpu_crtc == NULL) {
360                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361                 return;
362         }
363
364         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
365
366         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368                                                  amdgpu_crtc->pflip_status,
369                                                  AMDGPU_FLIP_SUBMITTED,
370                                                  amdgpu_crtc->crtc_id,
371                                                  amdgpu_crtc);
372                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
373                 return;
374         }
375
376         /* page flip completed. */
377         e = amdgpu_crtc->event;
378         amdgpu_crtc->event = NULL;
379
380         if (!e)
381                 WARN_ON(1);
382
383         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
384
385         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
386         if (!vrr_active ||
387             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388                                       &v_blank_end, &hpos, &vpos) ||
389             (vpos < v_blank_start)) {
390                 /* Update to correct count and vblank timestamp if racing with
391                  * vblank irq. This also updates to the correct vblank timestamp
392                  * even in VRR mode, as scanout is past the front-porch atm.
393                  */
394                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
395
396                 /* Wake up userspace by sending the pageflip event with proper
397                  * count and timestamp of vblank of flip completion.
398                  */
399                 if (e) {
400                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401
402                         /* Event sent, so done with vblank for this flip */
403                         drm_crtc_vblank_put(&amdgpu_crtc->base);
404                 }
405         } else if (e) {
406                 /* VRR active and inside front-porch: vblank count and
407                  * timestamp for pageflip event will only be up to date after
408                  * drm_crtc_handle_vblank() has been executed from late vblank
409                  * irq handler after start of back-porch (vline 0). We queue the
410                  * pageflip event for send-out by drm_crtc_handle_vblank() with
411                  * updated timestamp and count, once it runs after us.
412                  *
413                  * We need to open-code this instead of using the helper
414                  * drm_crtc_arm_vblank_event(), as that helper would
415                  * call drm_crtc_accurate_vblank_count(), which we must
416                  * not call in VRR mode while we are in front-porch!
417                  */
418
419                 /* sequence will be replaced by real count during send-out. */
420                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421                 e->pipe = amdgpu_crtc->crtc_id;
422
423                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
424                 e = NULL;
425         }
426
427         /* Keep track of vblank of this flip for flip throttling. We use the
428          * cooked hw counter, as that one incremented at start of this vblank
429          * of pageflip completion, so last_flip_vblank is the forbidden count
430          * for queueing new pageflips if vsync + VRR is enabled.
431          */
432         amdgpu_crtc->dm_irq_params.last_flip_vblank =
433                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
434
435         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
437
438         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439                          amdgpu_crtc->crtc_id, amdgpu_crtc,
440                          vrr_active, (int) !e);
441 }
442
443 static void dm_vupdate_high_irq(void *interrupt_params)
444 {
445         struct common_irq_params *irq_params = interrupt_params;
446         struct amdgpu_device *adev = irq_params->adev;
447         struct amdgpu_crtc *acrtc;
448         unsigned long flags;
449         int vrr_active;
450
451         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452
453         if (acrtc) {
454                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
455
456                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457                               acrtc->crtc_id,
458                               vrr_active);
459
460                 /* Core vblank handling is done here after end of front-porch in
461                  * vrr mode, as vblank timestamping will give valid results
462                  * while now done after front-porch. This will also deliver
463                  * page-flip completion events that have been queued to us
464                  * if a pageflip happened inside front-porch.
465                  */
466                 if (vrr_active) {
467                         drm_crtc_handle_vblank(&acrtc->base);
468
469                         /* BTR processing for pre-DCE12 ASICs */
470                         if (acrtc->dm_irq_params.stream &&
471                             adev->family < AMDGPU_FAMILY_AI) {
472                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473                                 mod_freesync_handle_v_update(
474                                     adev->dm.freesync_module,
475                                     acrtc->dm_irq_params.stream,
476                                     &acrtc->dm_irq_params.vrr_params);
477
478                                 dc_stream_adjust_vmin_vmax(
479                                     adev->dm.dc,
480                                     acrtc->dm_irq_params.stream,
481                                     &acrtc->dm_irq_params.vrr_params.adjust);
482                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
483                         }
484                 }
485         }
486 }
487
488 /**
489  * dm_crtc_high_irq() - Handles CRTC interrupt
490  * @interrupt_params: used for determining the CRTC instance
491  *
492  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493  * event handler.
494  */
495 static void dm_crtc_high_irq(void *interrupt_params)
496 {
497         struct common_irq_params *irq_params = interrupt_params;
498         struct amdgpu_device *adev = irq_params->adev;
499         struct amdgpu_crtc *acrtc;
500         unsigned long flags;
501         int vrr_active;
502
503         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
504         if (!acrtc)
505                 return;
506
507         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
508
509         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510                       vrr_active, acrtc->dm_irq_params.active_planes);
511
512         /**
513          * Core vblank handling at start of front-porch is only possible
514          * in non-vrr mode, as only there vblank timestamping will give
515          * valid results while done in front-porch. Otherwise defer it
516          * to dm_vupdate_high_irq after end of front-porch.
517          */
518         if (!vrr_active)
519                 drm_crtc_handle_vblank(&acrtc->base);
520
521         /**
522          * Following stuff must happen at start of vblank, for crc
523          * computation and below-the-range btr support in vrr mode.
524          */
525         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
526
527         /* BTR updates need to happen before VUPDATE on Vega and above. */
528         if (adev->family < AMDGPU_FAMILY_AI)
529                 return;
530
531         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
532
533         if (acrtc->dm_irq_params.stream &&
534             acrtc->dm_irq_params.vrr_params.supported &&
535             acrtc->dm_irq_params.freesync_config.state ==
536                     VRR_STATE_ACTIVE_VARIABLE) {
537                 mod_freesync_handle_v_update(adev->dm.freesync_module,
538                                              acrtc->dm_irq_params.stream,
539                                              &acrtc->dm_irq_params.vrr_params);
540
541                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542                                            &acrtc->dm_irq_params.vrr_params.adjust);
543         }
544
545         /*
546          * If there aren't any active_planes then DCH HUBP may be clock-gated.
547          * In that case, pageflip completion interrupts won't fire and pageflip
548          * completion events won't get delivered. Prevent this by sending
549          * pending pageflip events from here if a flip is still pending.
550          *
551          * If any planes are enabled, use dm_pflip_high_irq() instead, to
552          * avoid race conditions between flip programming and completion,
553          * which could cause too early flip completion events.
554          */
555         if (adev->family >= AMDGPU_FAMILY_RV &&
556             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557             acrtc->dm_irq_params.active_planes == 0) {
558                 if (acrtc->event) {
559                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560                         acrtc->event = NULL;
561                         drm_crtc_vblank_put(&acrtc->base);
562                 }
563                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
564         }
565
566         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
567 }
568
569 static int dm_set_clockgating_state(void *handle,
570                   enum amd_clockgating_state state)
571 {
572         return 0;
573 }
574
575 static int dm_set_powergating_state(void *handle,
576                   enum amd_powergating_state state)
577 {
578         return 0;
579 }
580
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
583
584 /* Allocate memory for FBC compressed data  */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
586 {
587         struct drm_device *dev = connector->dev;
588         struct amdgpu_device *adev = drm_to_adev(dev);
589         struct dm_compressor_info *compressor = &adev->dm.compressor;
590         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591         struct drm_display_mode *mode;
592         unsigned long max_size = 0;
593
594         if (adev->dm.dc->fbc_compressor == NULL)
595                 return;
596
597         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
598                 return;
599
600         if (compressor->bo_ptr)
601                 return;
602
603
604         list_for_each_entry(mode, &connector->modes, head) {
605                 if (max_size < mode->htotal * mode->vtotal)
606                         max_size = mode->htotal * mode->vtotal;
607         }
608
609         if (max_size) {
610                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612                             &compressor->gpu_addr, &compressor->cpu_addr);
613
614                 if (r)
615                         DRM_ERROR("DM: Failed to initialize FBC\n");
616                 else {
617                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619                 }
620
621         }
622
623 }
624
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626                                           int pipe, bool *enabled,
627                                           unsigned char *buf, int max_bytes)
628 {
629         struct drm_device *dev = dev_get_drvdata(kdev);
630         struct amdgpu_device *adev = drm_to_adev(dev);
631         struct drm_connector *connector;
632         struct drm_connector_list_iter conn_iter;
633         struct amdgpu_dm_connector *aconnector;
634         int ret = 0;
635
636         *enabled = false;
637
638         mutex_lock(&adev->dm.audio_lock);
639
640         drm_connector_list_iter_begin(dev, &conn_iter);
641         drm_for_each_connector_iter(connector, &conn_iter) {
642                 aconnector = to_amdgpu_dm_connector(connector);
643                 if (aconnector->audio_inst != port)
644                         continue;
645
646                 *enabled = true;
647                 ret = drm_eld_size(connector->eld);
648                 memcpy(buf, connector->eld, min(max_bytes, ret));
649
650                 break;
651         }
652         drm_connector_list_iter_end(&conn_iter);
653
654         mutex_unlock(&adev->dm.audio_lock);
655
656         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657
658         return ret;
659 }
660
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662         .get_eld = amdgpu_dm_audio_component_get_eld,
663 };
664
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666                                        struct device *hda_kdev, void *data)
667 {
668         struct drm_device *dev = dev_get_drvdata(kdev);
669         struct amdgpu_device *adev = drm_to_adev(dev);
670         struct drm_audio_component *acomp = data;
671
672         acomp->ops = &amdgpu_dm_audio_component_ops;
673         acomp->dev = kdev;
674         adev->dm.audio_component = acomp;
675
676         return 0;
677 }
678
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680                                           struct device *hda_kdev, void *data)
681 {
682         struct drm_device *dev = dev_get_drvdata(kdev);
683         struct amdgpu_device *adev = drm_to_adev(dev);
684         struct drm_audio_component *acomp = data;
685
686         acomp->ops = NULL;
687         acomp->dev = NULL;
688         adev->dm.audio_component = NULL;
689 }
690
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692         .bind   = amdgpu_dm_audio_component_bind,
693         .unbind = amdgpu_dm_audio_component_unbind,
694 };
695
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697 {
698         int i, ret;
699
700         if (!amdgpu_audio)
701                 return 0;
702
703         adev->mode_info.audio.enabled = true;
704
705         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706
707         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708                 adev->mode_info.audio.pin[i].channels = -1;
709                 adev->mode_info.audio.pin[i].rate = -1;
710                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
711                 adev->mode_info.audio.pin[i].status_bits = 0;
712                 adev->mode_info.audio.pin[i].category_code = 0;
713                 adev->mode_info.audio.pin[i].connected = false;
714                 adev->mode_info.audio.pin[i].id =
715                         adev->dm.dc->res_pool->audios[i]->inst;
716                 adev->mode_info.audio.pin[i].offset = 0;
717         }
718
719         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720         if (ret < 0)
721                 return ret;
722
723         adev->dm.audio_registered = true;
724
725         return 0;
726 }
727
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729 {
730         if (!amdgpu_audio)
731                 return;
732
733         if (!adev->mode_info.audio.enabled)
734                 return;
735
736         if (adev->dm.audio_registered) {
737                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738                 adev->dm.audio_registered = false;
739         }
740
741         /* TODO: Disable audio? */
742
743         adev->mode_info.audio.enabled = false;
744 }
745
746 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
747 {
748         struct drm_audio_component *acomp = adev->dm.audio_component;
749
750         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752
753                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754                                                  pin, -1);
755         }
756 }
757
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
759 {
760         const struct dmcub_firmware_header_v1_0 *hdr;
761         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763         const struct firmware *dmub_fw = adev->dm.dmub_fw;
764         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765         struct abm *abm = adev->dm.dc->res_pool->abm;
766         struct dmub_srv_hw_params hw_params;
767         enum dmub_status status;
768         const unsigned char *fw_inst_const, *fw_bss_data;
769         uint32_t i, fw_inst_const_size, fw_bss_data_size;
770         bool has_hw_support;
771
772         if (!dmub_srv)
773                 /* DMUB isn't supported on the ASIC. */
774                 return 0;
775
776         if (!fb_info) {
777                 DRM_ERROR("No framebuffer info for DMUB service.\n");
778                 return -EINVAL;
779         }
780
781         if (!dmub_fw) {
782                 /* Firmware required for DMUB support. */
783                 DRM_ERROR("No firmware provided for DMUB.\n");
784                 return -EINVAL;
785         }
786
787         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788         if (status != DMUB_STATUS_OK) {
789                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790                 return -EINVAL;
791         }
792
793         if (!has_hw_support) {
794                 DRM_INFO("DMUB unsupported on ASIC\n");
795                 return 0;
796         }
797
798         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799
800         fw_inst_const = dmub_fw->data +
801                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
802                         PSP_HEADER_BYTES;
803
804         fw_bss_data = dmub_fw->data +
805                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806                       le32_to_cpu(hdr->inst_const_bytes);
807
808         /* Copy firmware and bios info into FB memory. */
809         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811
812         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813
814         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815          * amdgpu_ucode_init_single_fw will load dmub firmware
816          * fw_inst_const part to cw0; otherwise, the firmware back door load
817          * will be done by dm_dmub_hw_init
818          */
819         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821                                 fw_inst_const_size);
822         }
823
824         if (fw_bss_data_size)
825                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826                        fw_bss_data, fw_bss_data_size);
827
828         /* Copy firmware bios info into FB memory. */
829         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830                adev->bios_size);
831
832         /* Reset regions that need to be reset. */
833         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835
836         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838
839         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
841
842         /* Initialize hardware. */
843         memset(&hw_params, 0, sizeof(hw_params));
844         hw_params.fb_base = adev->gmc.fb_start;
845         hw_params.fb_offset = adev->gmc.aper_base;
846
847         /* backdoor load firmware and trigger dmub running */
848         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849                 hw_params.load_inst_const = true;
850
851         if (dmcu)
852                 hw_params.psp_version = dmcu->psp_version;
853
854         for (i = 0; i < fb_info->num_fb; ++i)
855                 hw_params.fb[i] = &fb_info->fb[i];
856
857         status = dmub_srv_hw_init(dmub_srv, &hw_params);
858         if (status != DMUB_STATUS_OK) {
859                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860                 return -EINVAL;
861         }
862
863         /* Wait for firmware load to finish. */
864         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865         if (status != DMUB_STATUS_OK)
866                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867
868         /* Init DMCU and ABM if available. */
869         if (dmcu && abm) {
870                 dmcu->funcs->dmcu_init(dmcu);
871                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872         }
873
874         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875         if (!adev->dm.dc->ctx->dmub_srv) {
876                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877                 return -ENOMEM;
878         }
879
880         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881                  adev->dm.dmcub_fw_version);
882
883         return 0;
884 }
885
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
888 {
889         uint64_t pt_base;
890         uint32_t logical_addr_low;
891         uint32_t logical_addr_high;
892         uint32_t agp_base, agp_bot, agp_top;
893         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
894
895         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
897
898         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899                 /*
900                  * Raven2 has a HW issue that it is unable to use the vram which
901                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902                  * workaround that increase system aperture high address (add 1)
903                  * to get rid of the VM fault and hardware hang.
904                  */
905                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906         else
907                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
908
909         agp_base = 0;
910         agp_bot = adev->gmc.agp_start >> 24;
911         agp_top = adev->gmc.agp_end >> 24;
912
913
914         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919         page_table_base.low_part = lower_32_bits(pt_base);
920
921         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923
924         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927
928         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931
932         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935
936         pa_config->is_hvm_enabled = 0;
937
938 }
939 #endif
940
941 #ifdef CONFIG_DEBUG_FS
942 static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
943 {
944         dm->crc_win_x_start_property =
945                 drm_property_create_range(adev_to_drm(dm->adev),
946                                           DRM_MODE_PROP_ATOMIC,
947                                           "AMD_CRC_WIN_X_START", 0, U16_MAX);
948         if (!dm->crc_win_x_start_property)
949                 return -ENOMEM;
950
951         dm->crc_win_y_start_property =
952                 drm_property_create_range(adev_to_drm(dm->adev),
953                                           DRM_MODE_PROP_ATOMIC,
954                                           "AMD_CRC_WIN_Y_START", 0, U16_MAX);
955         if (!dm->crc_win_y_start_property)
956                 return -ENOMEM;
957
958         dm->crc_win_x_end_property =
959                 drm_property_create_range(adev_to_drm(dm->adev),
960                                           DRM_MODE_PROP_ATOMIC,
961                                           "AMD_CRC_WIN_X_END", 0, U16_MAX);
962         if (!dm->crc_win_x_end_property)
963                 return -ENOMEM;
964
965         dm->crc_win_y_end_property =
966                 drm_property_create_range(adev_to_drm(dm->adev),
967                                           DRM_MODE_PROP_ATOMIC,
968                                           "AMD_CRC_WIN_Y_END", 0, U16_MAX);
969         if (!dm->crc_win_y_end_property)
970                 return -ENOMEM;
971
972         return 0;
973 }
974 #endif
975
976 static int amdgpu_dm_init(struct amdgpu_device *adev)
977 {
978         struct dc_init_data init_data;
979 #ifdef CONFIG_DRM_AMD_DC_HDCP
980         struct dc_callback_init init_params;
981 #endif
982         int r;
983
984         adev->dm.ddev = adev_to_drm(adev);
985         adev->dm.adev = adev;
986
987         /* Zero all the fields */
988         memset(&init_data, 0, sizeof(init_data));
989 #ifdef CONFIG_DRM_AMD_DC_HDCP
990         memset(&init_params, 0, sizeof(init_params));
991 #endif
992
993         mutex_init(&adev->dm.dc_lock);
994         mutex_init(&adev->dm.audio_lock);
995
996         if(amdgpu_dm_irq_init(adev)) {
997                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
998                 goto error;
999         }
1000
1001         init_data.asic_id.chip_family = adev->family;
1002
1003         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1004         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1005
1006         init_data.asic_id.vram_width = adev->gmc.vram_width;
1007         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1008         init_data.asic_id.atombios_base_address =
1009                 adev->mode_info.atom_context->bios;
1010
1011         init_data.driver = adev;
1012
1013         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1014
1015         if (!adev->dm.cgs_device) {
1016                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1017                 goto error;
1018         }
1019
1020         init_data.cgs_device = adev->dm.cgs_device;
1021
1022         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1023
1024         switch (adev->asic_type) {
1025         case CHIP_CARRIZO:
1026         case CHIP_STONEY:
1027         case CHIP_RAVEN:
1028         case CHIP_RENOIR:
1029                 init_data.flags.gpu_vm_support = true;
1030                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1031                         init_data.flags.disable_dmcu = true;
1032                 break;
1033 #if defined(CONFIG_DRM_AMD_DC_DCN)
1034         case CHIP_VANGOGH:
1035                 init_data.flags.gpu_vm_support = true;
1036                 break;
1037 #endif
1038         default:
1039                 break;
1040         }
1041
1042         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1043                 init_data.flags.fbc_support = true;
1044
1045         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1046                 init_data.flags.multi_mon_pp_mclk_switch = true;
1047
1048         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1049                 init_data.flags.disable_fractional_pwm = true;
1050
1051         init_data.flags.power_down_display_on_boot = true;
1052
1053         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1054
1055         /* Display Core create. */
1056         adev->dm.dc = dc_create(&init_data);
1057
1058         if (adev->dm.dc) {
1059                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1060         } else {
1061                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1062                 goto error;
1063         }
1064
1065         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1066                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1067                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1068         }
1069
1070         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1071                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1072
1073         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1074                 adev->dm.dc->debug.disable_stutter = true;
1075
1076         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1077                 adev->dm.dc->debug.disable_dsc = true;
1078
1079         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1080                 adev->dm.dc->debug.disable_clock_gate = true;
1081
1082         r = dm_dmub_hw_init(adev);
1083         if (r) {
1084                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1085                 goto error;
1086         }
1087
1088         dc_hardware_init(adev->dm.dc);
1089
1090 #if defined(CONFIG_DRM_AMD_DC_DCN)
1091         if (adev->apu_flags) {
1092                 struct dc_phy_addr_space_config pa_config;
1093
1094                 mmhub_read_system_context(adev, &pa_config);
1095
1096                 // Call the DC init_memory func
1097                 dc_setup_system_context(adev->dm.dc, &pa_config);
1098         }
1099 #endif
1100
1101         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1102         if (!adev->dm.freesync_module) {
1103                 DRM_ERROR(
1104                 "amdgpu: failed to initialize freesync_module.\n");
1105         } else
1106                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1107                                 adev->dm.freesync_module);
1108
1109         amdgpu_dm_init_color_mod();
1110
1111 #ifdef CONFIG_DRM_AMD_DC_HDCP
1112         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1113                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1114
1115                 if (!adev->dm.hdcp_workqueue)
1116                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1117                 else
1118                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1119
1120                 dc_init_callbacks(adev->dm.dc, &init_params);
1121         }
1122 #endif
1123 #ifdef CONFIG_DEBUG_FS
1124         if (create_crtc_crc_properties(&adev->dm))
1125                 DRM_ERROR("amdgpu: failed to create crc property.\n");
1126 #endif
1127         if (amdgpu_dm_initialize_drm_device(adev)) {
1128                 DRM_ERROR(
1129                 "amdgpu: failed to initialize sw for display support.\n");
1130                 goto error;
1131         }
1132
1133         /* create fake encoders for MST */
1134         dm_dp_create_fake_mst_encoders(adev);
1135
1136         /* TODO: Add_display_info? */
1137
1138         /* TODO use dynamic cursor width */
1139         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1140         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1141
1142         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1143                 DRM_ERROR(
1144                 "amdgpu: failed to initialize sw for display support.\n");
1145                 goto error;
1146         }
1147
1148
1149         DRM_DEBUG_DRIVER("KMS initialized.\n");
1150
1151         return 0;
1152 error:
1153         amdgpu_dm_fini(adev);
1154
1155         return -EINVAL;
1156 }
1157
1158 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1159 {
1160         int i;
1161
1162         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1163                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1164         }
1165
1166         amdgpu_dm_audio_fini(adev);
1167
1168         amdgpu_dm_destroy_drm_device(&adev->dm);
1169
1170 #ifdef CONFIG_DRM_AMD_DC_HDCP
1171         if (adev->dm.hdcp_workqueue) {
1172                 hdcp_destroy(adev->dm.hdcp_workqueue);
1173                 adev->dm.hdcp_workqueue = NULL;
1174         }
1175
1176         if (adev->dm.dc)
1177                 dc_deinit_callbacks(adev->dm.dc);
1178 #endif
1179         if (adev->dm.dc->ctx->dmub_srv) {
1180                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1181                 adev->dm.dc->ctx->dmub_srv = NULL;
1182         }
1183
1184         if (adev->dm.dmub_bo)
1185                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1186                                       &adev->dm.dmub_bo_gpu_addr,
1187                                       &adev->dm.dmub_bo_cpu_addr);
1188
1189         /* DC Destroy TODO: Replace destroy DAL */
1190         if (adev->dm.dc)
1191                 dc_destroy(&adev->dm.dc);
1192         /*
1193          * TODO: pageflip, vlank interrupt
1194          *
1195          * amdgpu_dm_irq_fini(adev);
1196          */
1197
1198         if (adev->dm.cgs_device) {
1199                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1200                 adev->dm.cgs_device = NULL;
1201         }
1202         if (adev->dm.freesync_module) {
1203                 mod_freesync_destroy(adev->dm.freesync_module);
1204                 adev->dm.freesync_module = NULL;
1205         }
1206
1207         mutex_destroy(&adev->dm.audio_lock);
1208         mutex_destroy(&adev->dm.dc_lock);
1209
1210         return;
1211 }
1212
1213 static int load_dmcu_fw(struct amdgpu_device *adev)
1214 {
1215         const char *fw_name_dmcu = NULL;
1216         int r;
1217         const struct dmcu_firmware_header_v1_0 *hdr;
1218
1219         switch(adev->asic_type) {
1220 #if defined(CONFIG_DRM_AMD_DC_SI)
1221         case CHIP_TAHITI:
1222         case CHIP_PITCAIRN:
1223         case CHIP_VERDE:
1224         case CHIP_OLAND:
1225 #endif
1226         case CHIP_BONAIRE:
1227         case CHIP_HAWAII:
1228         case CHIP_KAVERI:
1229         case CHIP_KABINI:
1230         case CHIP_MULLINS:
1231         case CHIP_TONGA:
1232         case CHIP_FIJI:
1233         case CHIP_CARRIZO:
1234         case CHIP_STONEY:
1235         case CHIP_POLARIS11:
1236         case CHIP_POLARIS10:
1237         case CHIP_POLARIS12:
1238         case CHIP_VEGAM:
1239         case CHIP_VEGA10:
1240         case CHIP_VEGA12:
1241         case CHIP_VEGA20:
1242         case CHIP_NAVI10:
1243         case CHIP_NAVI14:
1244         case CHIP_RENOIR:
1245         case CHIP_SIENNA_CICHLID:
1246         case CHIP_NAVY_FLOUNDER:
1247         case CHIP_DIMGREY_CAVEFISH:
1248         case CHIP_VANGOGH:
1249                 return 0;
1250         case CHIP_NAVI12:
1251                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1252                 break;
1253         case CHIP_RAVEN:
1254                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1255                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1256                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1257                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1258                 else
1259                         return 0;
1260                 break;
1261         default:
1262                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1263                 return -EINVAL;
1264         }
1265
1266         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1267                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1268                 return 0;
1269         }
1270
1271         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1272         if (r == -ENOENT) {
1273                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1274                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1275                 adev->dm.fw_dmcu = NULL;
1276                 return 0;
1277         }
1278         if (r) {
1279                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1280                         fw_name_dmcu);
1281                 return r;
1282         }
1283
1284         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1285         if (r) {
1286                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1287                         fw_name_dmcu);
1288                 release_firmware(adev->dm.fw_dmcu);
1289                 adev->dm.fw_dmcu = NULL;
1290                 return r;
1291         }
1292
1293         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1294         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1295         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1296         adev->firmware.fw_size +=
1297                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1298
1299         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1300         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1301         adev->firmware.fw_size +=
1302                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1303
1304         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1305
1306         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1307
1308         return 0;
1309 }
1310
1311 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1312 {
1313         struct amdgpu_device *adev = ctx;
1314
1315         return dm_read_reg(adev->dm.dc->ctx, address);
1316 }
1317
1318 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1319                                      uint32_t value)
1320 {
1321         struct amdgpu_device *adev = ctx;
1322
1323         return dm_write_reg(adev->dm.dc->ctx, address, value);
1324 }
1325
1326 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1327 {
1328         struct dmub_srv_create_params create_params;
1329         struct dmub_srv_region_params region_params;
1330         struct dmub_srv_region_info region_info;
1331         struct dmub_srv_fb_params fb_params;
1332         struct dmub_srv_fb_info *fb_info;
1333         struct dmub_srv *dmub_srv;
1334         const struct dmcub_firmware_header_v1_0 *hdr;
1335         const char *fw_name_dmub;
1336         enum dmub_asic dmub_asic;
1337         enum dmub_status status;
1338         int r;
1339
1340         switch (adev->asic_type) {
1341         case CHIP_RENOIR:
1342                 dmub_asic = DMUB_ASIC_DCN21;
1343                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1344                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1345                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1346                 break;
1347         case CHIP_SIENNA_CICHLID:
1348                 dmub_asic = DMUB_ASIC_DCN30;
1349                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1350                 break;
1351         case CHIP_NAVY_FLOUNDER:
1352                 dmub_asic = DMUB_ASIC_DCN30;
1353                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1354                 break;
1355         case CHIP_VANGOGH:
1356                 dmub_asic = DMUB_ASIC_DCN301;
1357                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1358                 break;
1359         case CHIP_DIMGREY_CAVEFISH:
1360                 dmub_asic = DMUB_ASIC_DCN302;
1361                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1362                 break;
1363
1364         default:
1365                 /* ASIC doesn't support DMUB. */
1366                 return 0;
1367         }
1368
1369         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1370         if (r) {
1371                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1372                 return 0;
1373         }
1374
1375         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1376         if (r) {
1377                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1378                 return 0;
1379         }
1380
1381         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1382
1383         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1384                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1385                         AMDGPU_UCODE_ID_DMCUB;
1386                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1387                         adev->dm.dmub_fw;
1388                 adev->firmware.fw_size +=
1389                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1390
1391                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1392                          adev->dm.dmcub_fw_version);
1393         }
1394
1395         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1396
1397         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1398         dmub_srv = adev->dm.dmub_srv;
1399
1400         if (!dmub_srv) {
1401                 DRM_ERROR("Failed to allocate DMUB service!\n");
1402                 return -ENOMEM;
1403         }
1404
1405         memset(&create_params, 0, sizeof(create_params));
1406         create_params.user_ctx = adev;
1407         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1408         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1409         create_params.asic = dmub_asic;
1410
1411         /* Create the DMUB service. */
1412         status = dmub_srv_create(dmub_srv, &create_params);
1413         if (status != DMUB_STATUS_OK) {
1414                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1415                 return -EINVAL;
1416         }
1417
1418         /* Calculate the size of all the regions for the DMUB service. */
1419         memset(&region_params, 0, sizeof(region_params));
1420
1421         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1422                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1423         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1424         region_params.vbios_size = adev->bios_size;
1425         region_params.fw_bss_data = region_params.bss_data_size ?
1426                 adev->dm.dmub_fw->data +
1427                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1428                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1429         region_params.fw_inst_const =
1430                 adev->dm.dmub_fw->data +
1431                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1432                 PSP_HEADER_BYTES;
1433
1434         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1435                                            &region_info);
1436
1437         if (status != DMUB_STATUS_OK) {
1438                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1439                 return -EINVAL;
1440         }
1441
1442         /*
1443          * Allocate a framebuffer based on the total size of all the regions.
1444          * TODO: Move this into GART.
1445          */
1446         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1447                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1448                                     &adev->dm.dmub_bo_gpu_addr,
1449                                     &adev->dm.dmub_bo_cpu_addr);
1450         if (r)
1451                 return r;
1452
1453         /* Rebase the regions on the framebuffer address. */
1454         memset(&fb_params, 0, sizeof(fb_params));
1455         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1456         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1457         fb_params.region_info = &region_info;
1458
1459         adev->dm.dmub_fb_info =
1460                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1461         fb_info = adev->dm.dmub_fb_info;
1462
1463         if (!fb_info) {
1464                 DRM_ERROR(
1465                         "Failed to allocate framebuffer info for DMUB service!\n");
1466                 return -ENOMEM;
1467         }
1468
1469         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1470         if (status != DMUB_STATUS_OK) {
1471                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1472                 return -EINVAL;
1473         }
1474
1475         return 0;
1476 }
1477
1478 static int dm_sw_init(void *handle)
1479 {
1480         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1481         int r;
1482
1483         r = dm_dmub_sw_init(adev);
1484         if (r)
1485                 return r;
1486
1487         return load_dmcu_fw(adev);
1488 }
1489
1490 static int dm_sw_fini(void *handle)
1491 {
1492         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1493
1494         kfree(adev->dm.dmub_fb_info);
1495         adev->dm.dmub_fb_info = NULL;
1496
1497         if (adev->dm.dmub_srv) {
1498                 dmub_srv_destroy(adev->dm.dmub_srv);
1499                 adev->dm.dmub_srv = NULL;
1500         }
1501
1502         release_firmware(adev->dm.dmub_fw);
1503         adev->dm.dmub_fw = NULL;
1504
1505         release_firmware(adev->dm.fw_dmcu);
1506         adev->dm.fw_dmcu = NULL;
1507
1508         return 0;
1509 }
1510
1511 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1512 {
1513         struct amdgpu_dm_connector *aconnector;
1514         struct drm_connector *connector;
1515         struct drm_connector_list_iter iter;
1516         int ret = 0;
1517
1518         drm_connector_list_iter_begin(dev, &iter);
1519         drm_for_each_connector_iter(connector, &iter) {
1520                 aconnector = to_amdgpu_dm_connector(connector);
1521                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1522                     aconnector->mst_mgr.aux) {
1523                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1524                                          aconnector,
1525                                          aconnector->base.base.id);
1526
1527                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1528                         if (ret < 0) {
1529                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1530                                 aconnector->dc_link->type =
1531                                         dc_connection_single;
1532                                 break;
1533                         }
1534                 }
1535         }
1536         drm_connector_list_iter_end(&iter);
1537
1538         return ret;
1539 }
1540
1541 static int dm_late_init(void *handle)
1542 {
1543         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1544
1545         struct dmcu_iram_parameters params;
1546         unsigned int linear_lut[16];
1547         int i;
1548         struct dmcu *dmcu = NULL;
1549         bool ret = true;
1550
1551         dmcu = adev->dm.dc->res_pool->dmcu;
1552
1553         for (i = 0; i < 16; i++)
1554                 linear_lut[i] = 0xFFFF * i / 15;
1555
1556         params.set = 0;
1557         params.backlight_ramping_start = 0xCCCC;
1558         params.backlight_ramping_reduction = 0xCCCCCCCC;
1559         params.backlight_lut_array_size = 16;
1560         params.backlight_lut_array = linear_lut;
1561
1562         /* Min backlight level after ABM reduction,  Don't allow below 1%
1563          * 0xFFFF x 0.01 = 0x28F
1564          */
1565         params.min_abm_backlight = 0x28F;
1566
1567         /* In the case where abm is implemented on dmcub,
1568          * dmcu object will be null.
1569          * ABM 2.4 and up are implemented on dmcub.
1570          */
1571         if (dmcu)
1572                 ret = dmcu_load_iram(dmcu, params);
1573         else if (adev->dm.dc->ctx->dmub_srv)
1574                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1575
1576         if (!ret)
1577                 return -EINVAL;
1578
1579         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1580 }
1581
1582 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1583 {
1584         struct amdgpu_dm_connector *aconnector;
1585         struct drm_connector *connector;
1586         struct drm_connector_list_iter iter;
1587         struct drm_dp_mst_topology_mgr *mgr;
1588         int ret;
1589         bool need_hotplug = false;
1590
1591         drm_connector_list_iter_begin(dev, &iter);
1592         drm_for_each_connector_iter(connector, &iter) {
1593                 aconnector = to_amdgpu_dm_connector(connector);
1594                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1595                     aconnector->mst_port)
1596                         continue;
1597
1598                 mgr = &aconnector->mst_mgr;
1599
1600                 if (suspend) {
1601                         drm_dp_mst_topology_mgr_suspend(mgr);
1602                 } else {
1603                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1604                         if (ret < 0) {
1605                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1606                                 need_hotplug = true;
1607                         }
1608                 }
1609         }
1610         drm_connector_list_iter_end(&iter);
1611
1612         if (need_hotplug)
1613                 drm_kms_helper_hotplug_event(dev);
1614 }
1615
1616 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1617 {
1618         struct smu_context *smu = &adev->smu;
1619         int ret = 0;
1620
1621         if (!is_support_sw_smu(adev))
1622                 return 0;
1623
1624         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1625          * on window driver dc implementation.
1626          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1627          * should be passed to smu during boot up and resume from s3.
1628          * boot up: dc calculate dcn watermark clock settings within dc_create,
1629          * dcn20_resource_construct
1630          * then call pplib functions below to pass the settings to smu:
1631          * smu_set_watermarks_for_clock_ranges
1632          * smu_set_watermarks_table
1633          * navi10_set_watermarks_table
1634          * smu_write_watermarks_table
1635          *
1636          * For Renoir, clock settings of dcn watermark are also fixed values.
1637          * dc has implemented different flow for window driver:
1638          * dc_hardware_init / dc_set_power_state
1639          * dcn10_init_hw
1640          * notify_wm_ranges
1641          * set_wm_ranges
1642          * -- Linux
1643          * smu_set_watermarks_for_clock_ranges
1644          * renoir_set_watermarks_table
1645          * smu_write_watermarks_table
1646          *
1647          * For Linux,
1648          * dc_hardware_init -> amdgpu_dm_init
1649          * dc_set_power_state --> dm_resume
1650          *
1651          * therefore, this function apply to navi10/12/14 but not Renoir
1652          * *
1653          */
1654         switch(adev->asic_type) {
1655         case CHIP_NAVI10:
1656         case CHIP_NAVI14:
1657         case CHIP_NAVI12:
1658                 break;
1659         default:
1660                 return 0;
1661         }
1662
1663         ret = smu_write_watermarks_table(smu);
1664         if (ret) {
1665                 DRM_ERROR("Failed to update WMTABLE!\n");
1666                 return ret;
1667         }
1668
1669         return 0;
1670 }
1671
1672 /**
1673  * dm_hw_init() - Initialize DC device
1674  * @handle: The base driver device containing the amdgpu_dm device.
1675  *
1676  * Initialize the &struct amdgpu_display_manager device. This involves calling
1677  * the initializers of each DM component, then populating the struct with them.
1678  *
1679  * Although the function implies hardware initialization, both hardware and
1680  * software are initialized here. Splitting them out to their relevant init
1681  * hooks is a future TODO item.
1682  *
1683  * Some notable things that are initialized here:
1684  *
1685  * - Display Core, both software and hardware
1686  * - DC modules that we need (freesync and color management)
1687  * - DRM software states
1688  * - Interrupt sources and handlers
1689  * - Vblank support
1690  * - Debug FS entries, if enabled
1691  */
1692 static int dm_hw_init(void *handle)
1693 {
1694         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1695         /* Create DAL display manager */
1696         amdgpu_dm_init(adev);
1697         amdgpu_dm_hpd_init(adev);
1698
1699         return 0;
1700 }
1701
1702 /**
1703  * dm_hw_fini() - Teardown DC device
1704  * @handle: The base driver device containing the amdgpu_dm device.
1705  *
1706  * Teardown components within &struct amdgpu_display_manager that require
1707  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1708  * were loaded. Also flush IRQ workqueues and disable them.
1709  */
1710 static int dm_hw_fini(void *handle)
1711 {
1712         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1713
1714         amdgpu_dm_hpd_fini(adev);
1715
1716         amdgpu_dm_irq_fini(adev);
1717         amdgpu_dm_fini(adev);
1718         return 0;
1719 }
1720
1721
1722 static int dm_enable_vblank(struct drm_crtc *crtc);
1723 static void dm_disable_vblank(struct drm_crtc *crtc);
1724
1725 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1726                                  struct dc_state *state, bool enable)
1727 {
1728         enum dc_irq_source irq_source;
1729         struct amdgpu_crtc *acrtc;
1730         int rc = -EBUSY;
1731         int i = 0;
1732
1733         for (i = 0; i < state->stream_count; i++) {
1734                 acrtc = get_crtc_by_otg_inst(
1735                                 adev, state->stream_status[i].primary_otg_inst);
1736
1737                 if (acrtc && state->stream_status[i].plane_count != 0) {
1738                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1739                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1740                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1741                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1742                         if (rc)
1743                                 DRM_WARN("Failed to %s pflip interrupts\n",
1744                                          enable ? "enable" : "disable");
1745
1746                         if (enable) {
1747                                 rc = dm_enable_vblank(&acrtc->base);
1748                                 if (rc)
1749                                         DRM_WARN("Failed to enable vblank interrupts\n");
1750                         } else {
1751                                 dm_disable_vblank(&acrtc->base);
1752                         }
1753
1754                 }
1755         }
1756
1757 }
1758
1759 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1760 {
1761         struct dc_state *context = NULL;
1762         enum dc_status res = DC_ERROR_UNEXPECTED;
1763         int i;
1764         struct dc_stream_state *del_streams[MAX_PIPES];
1765         int del_streams_count = 0;
1766
1767         memset(del_streams, 0, sizeof(del_streams));
1768
1769         context = dc_create_state(dc);
1770         if (context == NULL)
1771                 goto context_alloc_fail;
1772
1773         dc_resource_state_copy_construct_current(dc, context);
1774
1775         /* First remove from context all streams */
1776         for (i = 0; i < context->stream_count; i++) {
1777                 struct dc_stream_state *stream = context->streams[i];
1778
1779                 del_streams[del_streams_count++] = stream;
1780         }
1781
1782         /* Remove all planes for removed streams and then remove the streams */
1783         for (i = 0; i < del_streams_count; i++) {
1784                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1785                         res = DC_FAIL_DETACH_SURFACES;
1786                         goto fail;
1787                 }
1788
1789                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1790                 if (res != DC_OK)
1791                         goto fail;
1792         }
1793
1794
1795         res = dc_validate_global_state(dc, context, false);
1796
1797         if (res != DC_OK) {
1798                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1799                 goto fail;
1800         }
1801
1802         res = dc_commit_state(dc, context);
1803
1804 fail:
1805         dc_release_state(context);
1806
1807 context_alloc_fail:
1808         return res;
1809 }
1810
1811 static int dm_suspend(void *handle)
1812 {
1813         struct amdgpu_device *adev = handle;
1814         struct amdgpu_display_manager *dm = &adev->dm;
1815         int ret = 0;
1816
1817         if (amdgpu_in_reset(adev)) {
1818                 mutex_lock(&dm->dc_lock);
1819                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1820
1821                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1822
1823                 amdgpu_dm_commit_zero_streams(dm->dc);
1824
1825                 amdgpu_dm_irq_suspend(adev);
1826
1827                 return ret;
1828         }
1829
1830         WARN_ON(adev->dm.cached_state);
1831         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1832
1833         s3_handle_mst(adev_to_drm(adev), true);
1834
1835         amdgpu_dm_irq_suspend(adev);
1836
1837
1838         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1839
1840         return 0;
1841 }
1842
1843 static struct amdgpu_dm_connector *
1844 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1845                                              struct drm_crtc *crtc)
1846 {
1847         uint32_t i;
1848         struct drm_connector_state *new_con_state;
1849         struct drm_connector *connector;
1850         struct drm_crtc *crtc_from_state;
1851
1852         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1853                 crtc_from_state = new_con_state->crtc;
1854
1855                 if (crtc_from_state == crtc)
1856                         return to_amdgpu_dm_connector(connector);
1857         }
1858
1859         return NULL;
1860 }
1861
1862 static void emulated_link_detect(struct dc_link *link)
1863 {
1864         struct dc_sink_init_data sink_init_data = { 0 };
1865         struct display_sink_capability sink_caps = { 0 };
1866         enum dc_edid_status edid_status;
1867         struct dc_context *dc_ctx = link->ctx;
1868         struct dc_sink *sink = NULL;
1869         struct dc_sink *prev_sink = NULL;
1870
1871         link->type = dc_connection_none;
1872         prev_sink = link->local_sink;
1873
1874         if (prev_sink != NULL)
1875                 dc_sink_retain(prev_sink);
1876
1877         switch (link->connector_signal) {
1878         case SIGNAL_TYPE_HDMI_TYPE_A: {
1879                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1880                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1881                 break;
1882         }
1883
1884         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1885                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1886                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1887                 break;
1888         }
1889
1890         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1891                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1892                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1893                 break;
1894         }
1895
1896         case SIGNAL_TYPE_LVDS: {
1897                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1898                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1899                 break;
1900         }
1901
1902         case SIGNAL_TYPE_EDP: {
1903                 sink_caps.transaction_type =
1904                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1905                 sink_caps.signal = SIGNAL_TYPE_EDP;
1906                 break;
1907         }
1908
1909         case SIGNAL_TYPE_DISPLAY_PORT: {
1910                 sink_caps.transaction_type =
1911                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1912                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1913                 break;
1914         }
1915
1916         default:
1917                 DC_ERROR("Invalid connector type! signal:%d\n",
1918                         link->connector_signal);
1919                 return;
1920         }
1921
1922         sink_init_data.link = link;
1923         sink_init_data.sink_signal = sink_caps.signal;
1924
1925         sink = dc_sink_create(&sink_init_data);
1926         if (!sink) {
1927                 DC_ERROR("Failed to create sink!\n");
1928                 return;
1929         }
1930
1931         /* dc_sink_create returns a new reference */
1932         link->local_sink = sink;
1933
1934         edid_status = dm_helpers_read_local_edid(
1935                         link->ctx,
1936                         link,
1937                         sink);
1938
1939         if (edid_status != EDID_OK)
1940                 DC_ERROR("Failed to read EDID");
1941
1942 }
1943
1944 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1945                                      struct amdgpu_display_manager *dm)
1946 {
1947         struct {
1948                 struct dc_surface_update surface_updates[MAX_SURFACES];
1949                 struct dc_plane_info plane_infos[MAX_SURFACES];
1950                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1951                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1952                 struct dc_stream_update stream_update;
1953         } * bundle;
1954         int k, m;
1955
1956         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1957
1958         if (!bundle) {
1959                 dm_error("Failed to allocate update bundle\n");
1960                 goto cleanup;
1961         }
1962
1963         for (k = 0; k < dc_state->stream_count; k++) {
1964                 bundle->stream_update.stream = dc_state->streams[k];
1965
1966                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1967                         bundle->surface_updates[m].surface =
1968                                 dc_state->stream_status->plane_states[m];
1969                         bundle->surface_updates[m].surface->force_full_update =
1970                                 true;
1971                 }
1972                 dc_commit_updates_for_stream(
1973                         dm->dc, bundle->surface_updates,
1974                         dc_state->stream_status->plane_count,
1975                         dc_state->streams[k], &bundle->stream_update, dc_state);
1976         }
1977
1978 cleanup:
1979         kfree(bundle);
1980
1981         return;
1982 }
1983
1984 static void dm_set_dpms_off(struct dc_link *link)
1985 {
1986         struct dc_stream_state *stream_state;
1987         struct amdgpu_dm_connector *aconnector = link->priv;
1988         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1989         struct dc_stream_update stream_update;
1990         bool dpms_off = true;
1991
1992         memset(&stream_update, 0, sizeof(stream_update));
1993         stream_update.dpms_off = &dpms_off;
1994
1995         mutex_lock(&adev->dm.dc_lock);
1996         stream_state = dc_stream_find_from_link(link);
1997
1998         if (stream_state == NULL) {
1999                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2000                 mutex_unlock(&adev->dm.dc_lock);
2001                 return;
2002         }
2003
2004         stream_update.stream = stream_state;
2005         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2006                                      stream_state, &stream_update,
2007                                      stream_state->ctx->dc->current_state);
2008         mutex_unlock(&adev->dm.dc_lock);
2009 }
2010
2011 static int dm_resume(void *handle)
2012 {
2013         struct amdgpu_device *adev = handle;
2014         struct drm_device *ddev = adev_to_drm(adev);
2015         struct amdgpu_display_manager *dm = &adev->dm;
2016         struct amdgpu_dm_connector *aconnector;
2017         struct drm_connector *connector;
2018         struct drm_connector_list_iter iter;
2019         struct drm_crtc *crtc;
2020         struct drm_crtc_state *new_crtc_state;
2021         struct dm_crtc_state *dm_new_crtc_state;
2022         struct drm_plane *plane;
2023         struct drm_plane_state *new_plane_state;
2024         struct dm_plane_state *dm_new_plane_state;
2025         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2026         enum dc_connection_type new_connection_type = dc_connection_none;
2027         struct dc_state *dc_state;
2028         int i, r, j;
2029
2030         if (amdgpu_in_reset(adev)) {
2031                 dc_state = dm->cached_dc_state;
2032
2033                 r = dm_dmub_hw_init(adev);
2034                 if (r)
2035                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2036
2037                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2038                 dc_resume(dm->dc);
2039
2040                 amdgpu_dm_irq_resume_early(adev);
2041
2042                 for (i = 0; i < dc_state->stream_count; i++) {
2043                         dc_state->streams[i]->mode_changed = true;
2044                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2045                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2046                                         = 0xffffffff;
2047                         }
2048                 }
2049
2050                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2051
2052                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2053
2054                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2055
2056                 dc_release_state(dm->cached_dc_state);
2057                 dm->cached_dc_state = NULL;
2058
2059                 amdgpu_dm_irq_resume_late(adev);
2060
2061                 mutex_unlock(&dm->dc_lock);
2062
2063                 return 0;
2064         }
2065         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2066         dc_release_state(dm_state->context);
2067         dm_state->context = dc_create_state(dm->dc);
2068         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2069         dc_resource_state_construct(dm->dc, dm_state->context);
2070
2071         /* Before powering on DC we need to re-initialize DMUB. */
2072         r = dm_dmub_hw_init(adev);
2073         if (r)
2074                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2075
2076         /* power on hardware */
2077         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2078
2079         /* program HPD filter */
2080         dc_resume(dm->dc);
2081
2082         /*
2083          * early enable HPD Rx IRQ, should be done before set mode as short
2084          * pulse interrupts are used for MST
2085          */
2086         amdgpu_dm_irq_resume_early(adev);
2087
2088         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2089         s3_handle_mst(ddev, false);
2090
2091         /* Do detection*/
2092         drm_connector_list_iter_begin(ddev, &iter);
2093         drm_for_each_connector_iter(connector, &iter) {
2094                 aconnector = to_amdgpu_dm_connector(connector);
2095
2096                 /*
2097                  * this is the case when traversing through already created
2098                  * MST connectors, should be skipped
2099                  */
2100                 if (aconnector->mst_port)
2101                         continue;
2102
2103                 mutex_lock(&aconnector->hpd_lock);
2104                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2105                         DRM_ERROR("KMS: Failed to detect connector\n");
2106
2107                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2108                         emulated_link_detect(aconnector->dc_link);
2109                 else
2110                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2111
2112                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2113                         aconnector->fake_enable = false;
2114
2115                 if (aconnector->dc_sink)
2116                         dc_sink_release(aconnector->dc_sink);
2117                 aconnector->dc_sink = NULL;
2118                 amdgpu_dm_update_connector_after_detect(aconnector);
2119                 mutex_unlock(&aconnector->hpd_lock);
2120         }
2121         drm_connector_list_iter_end(&iter);
2122
2123         /* Force mode set in atomic commit */
2124         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2125                 new_crtc_state->active_changed = true;
2126
2127         /*
2128          * atomic_check is expected to create the dc states. We need to release
2129          * them here, since they were duplicated as part of the suspend
2130          * procedure.
2131          */
2132         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2133                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2134                 if (dm_new_crtc_state->stream) {
2135                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2136                         dc_stream_release(dm_new_crtc_state->stream);
2137                         dm_new_crtc_state->stream = NULL;
2138                 }
2139         }
2140
2141         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2142                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2143                 if (dm_new_plane_state->dc_state) {
2144                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2145                         dc_plane_state_release(dm_new_plane_state->dc_state);
2146                         dm_new_plane_state->dc_state = NULL;
2147                 }
2148         }
2149
2150         drm_atomic_helper_resume(ddev, dm->cached_state);
2151
2152         dm->cached_state = NULL;
2153
2154         amdgpu_dm_irq_resume_late(adev);
2155
2156         amdgpu_dm_smu_write_watermarks_table(adev);
2157
2158         return 0;
2159 }
2160
2161 /**
2162  * DOC: DM Lifecycle
2163  *
2164  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2165  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2166  * the base driver's device list to be initialized and torn down accordingly.
2167  *
2168  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2169  */
2170
2171 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2172         .name = "dm",
2173         .early_init = dm_early_init,
2174         .late_init = dm_late_init,
2175         .sw_init = dm_sw_init,
2176         .sw_fini = dm_sw_fini,
2177         .hw_init = dm_hw_init,
2178         .hw_fini = dm_hw_fini,
2179         .suspend = dm_suspend,
2180         .resume = dm_resume,
2181         .is_idle = dm_is_idle,
2182         .wait_for_idle = dm_wait_for_idle,
2183         .check_soft_reset = dm_check_soft_reset,
2184         .soft_reset = dm_soft_reset,
2185         .set_clockgating_state = dm_set_clockgating_state,
2186         .set_powergating_state = dm_set_powergating_state,
2187 };
2188
2189 const struct amdgpu_ip_block_version dm_ip_block =
2190 {
2191         .type = AMD_IP_BLOCK_TYPE_DCE,
2192         .major = 1,
2193         .minor = 0,
2194         .rev = 0,
2195         .funcs = &amdgpu_dm_funcs,
2196 };
2197
2198
2199 /**
2200  * DOC: atomic
2201  *
2202  * *WIP*
2203  */
2204
2205 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2206         .fb_create = amdgpu_display_user_framebuffer_create,
2207         .get_format_info = amd_get_format_info,
2208         .output_poll_changed = drm_fb_helper_output_poll_changed,
2209         .atomic_check = amdgpu_dm_atomic_check,
2210         .atomic_commit = drm_atomic_helper_commit,
2211 };
2212
2213 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2214         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2215 };
2216
2217 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2218 {
2219         u32 max_cll, min_cll, max, min, q, r;
2220         struct amdgpu_dm_backlight_caps *caps;
2221         struct amdgpu_display_manager *dm;
2222         struct drm_connector *conn_base;
2223         struct amdgpu_device *adev;
2224         struct dc_link *link = NULL;
2225         static const u8 pre_computed_values[] = {
2226                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2227                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2228
2229         if (!aconnector || !aconnector->dc_link)
2230                 return;
2231
2232         link = aconnector->dc_link;
2233         if (link->connector_signal != SIGNAL_TYPE_EDP)
2234                 return;
2235
2236         conn_base = &aconnector->base;
2237         adev = drm_to_adev(conn_base->dev);
2238         dm = &adev->dm;
2239         caps = &dm->backlight_caps;
2240         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2241         caps->aux_support = false;
2242         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2243         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2244
2245         if (caps->ext_caps->bits.oled == 1 ||
2246             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2247             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2248                 caps->aux_support = true;
2249
2250         /* From the specification (CTA-861-G), for calculating the maximum
2251          * luminance we need to use:
2252          *      Luminance = 50*2**(CV/32)
2253          * Where CV is a one-byte value.
2254          * For calculating this expression we may need float point precision;
2255          * to avoid this complexity level, we take advantage that CV is divided
2256          * by a constant. From the Euclids division algorithm, we know that CV
2257          * can be written as: CV = 32*q + r. Next, we replace CV in the
2258          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2259          * need to pre-compute the value of r/32. For pre-computing the values
2260          * We just used the following Ruby line:
2261          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2262          * The results of the above expressions can be verified at
2263          * pre_computed_values.
2264          */
2265         q = max_cll >> 5;
2266         r = max_cll % 32;
2267         max = (1 << q) * pre_computed_values[r];
2268
2269         // min luminance: maxLum * (CV/255)^2 / 100
2270         q = DIV_ROUND_CLOSEST(min_cll, 255);
2271         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2272
2273         caps->aux_max_input_signal = max;
2274         caps->aux_min_input_signal = min;
2275 }
2276
2277 void amdgpu_dm_update_connector_after_detect(
2278                 struct amdgpu_dm_connector *aconnector)
2279 {
2280         struct drm_connector *connector = &aconnector->base;
2281         struct drm_device *dev = connector->dev;
2282         struct dc_sink *sink;
2283
2284         /* MST handled by drm_mst framework */
2285         if (aconnector->mst_mgr.mst_state == true)
2286                 return;
2287
2288         sink = aconnector->dc_link->local_sink;
2289         if (sink)
2290                 dc_sink_retain(sink);
2291
2292         /*
2293          * Edid mgmt connector gets first update only in mode_valid hook and then
2294          * the connector sink is set to either fake or physical sink depends on link status.
2295          * Skip if already done during boot.
2296          */
2297         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2298                         && aconnector->dc_em_sink) {
2299
2300                 /*
2301                  * For S3 resume with headless use eml_sink to fake stream
2302                  * because on resume connector->sink is set to NULL
2303                  */
2304                 mutex_lock(&dev->mode_config.mutex);
2305
2306                 if (sink) {
2307                         if (aconnector->dc_sink) {
2308                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2309                                 /*
2310                                  * retain and release below are used to
2311                                  * bump up refcount for sink because the link doesn't point
2312                                  * to it anymore after disconnect, so on next crtc to connector
2313                                  * reshuffle by UMD we will get into unwanted dc_sink release
2314                                  */
2315                                 dc_sink_release(aconnector->dc_sink);
2316                         }
2317                         aconnector->dc_sink = sink;
2318                         dc_sink_retain(aconnector->dc_sink);
2319                         amdgpu_dm_update_freesync_caps(connector,
2320                                         aconnector->edid);
2321                 } else {
2322                         amdgpu_dm_update_freesync_caps(connector, NULL);
2323                         if (!aconnector->dc_sink) {
2324                                 aconnector->dc_sink = aconnector->dc_em_sink;
2325                                 dc_sink_retain(aconnector->dc_sink);
2326                         }
2327                 }
2328
2329                 mutex_unlock(&dev->mode_config.mutex);
2330
2331                 if (sink)
2332                         dc_sink_release(sink);
2333                 return;
2334         }
2335
2336         /*
2337          * TODO: temporary guard to look for proper fix
2338          * if this sink is MST sink, we should not do anything
2339          */
2340         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2341                 dc_sink_release(sink);
2342                 return;
2343         }
2344
2345         if (aconnector->dc_sink == sink) {
2346                 /*
2347                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2348                  * Do nothing!!
2349                  */
2350                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2351                                 aconnector->connector_id);
2352                 if (sink)
2353                         dc_sink_release(sink);
2354                 return;
2355         }
2356
2357         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2358                 aconnector->connector_id, aconnector->dc_sink, sink);
2359
2360         mutex_lock(&dev->mode_config.mutex);
2361
2362         /*
2363          * 1. Update status of the drm connector
2364          * 2. Send an event and let userspace tell us what to do
2365          */
2366         if (sink) {
2367                 /*
2368                  * TODO: check if we still need the S3 mode update workaround.
2369                  * If yes, put it here.
2370                  */
2371                 if (aconnector->dc_sink)
2372                         amdgpu_dm_update_freesync_caps(connector, NULL);
2373
2374                 aconnector->dc_sink = sink;
2375                 dc_sink_retain(aconnector->dc_sink);
2376                 if (sink->dc_edid.length == 0) {
2377                         aconnector->edid = NULL;
2378                         if (aconnector->dc_link->aux_mode) {
2379                                 drm_dp_cec_unset_edid(
2380                                         &aconnector->dm_dp_aux.aux);
2381                         }
2382                 } else {
2383                         aconnector->edid =
2384                                 (struct edid *)sink->dc_edid.raw_edid;
2385
2386                         drm_connector_update_edid_property(connector,
2387                                                            aconnector->edid);
2388                         drm_add_edid_modes(connector, aconnector->edid);
2389
2390                         if (aconnector->dc_link->aux_mode)
2391                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2392                                                     aconnector->edid);
2393                 }
2394
2395                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2396                 update_connector_ext_caps(aconnector);
2397         } else {
2398                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2399                 amdgpu_dm_update_freesync_caps(connector, NULL);
2400                 drm_connector_update_edid_property(connector, NULL);
2401                 aconnector->num_modes = 0;
2402                 dc_sink_release(aconnector->dc_sink);
2403                 aconnector->dc_sink = NULL;
2404                 aconnector->edid = NULL;
2405 #ifdef CONFIG_DRM_AMD_DC_HDCP
2406                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2407                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2408                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2409 #endif
2410         }
2411
2412         mutex_unlock(&dev->mode_config.mutex);
2413
2414         update_subconnector_property(aconnector);
2415
2416         if (sink)
2417                 dc_sink_release(sink);
2418 }
2419
2420 static void handle_hpd_irq(void *param)
2421 {
2422         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2423         struct drm_connector *connector = &aconnector->base;
2424         struct drm_device *dev = connector->dev;
2425         enum dc_connection_type new_connection_type = dc_connection_none;
2426 #ifdef CONFIG_DRM_AMD_DC_HDCP
2427         struct amdgpu_device *adev = drm_to_adev(dev);
2428         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2429 #endif
2430
2431         /*
2432          * In case of failure or MST no need to update connector status or notify the OS
2433          * since (for MST case) MST does this in its own context.
2434          */
2435         mutex_lock(&aconnector->hpd_lock);
2436
2437 #ifdef CONFIG_DRM_AMD_DC_HDCP
2438         if (adev->dm.hdcp_workqueue) {
2439                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2440                 dm_con_state->update_hdcp = true;
2441         }
2442 #endif
2443         if (aconnector->fake_enable)
2444                 aconnector->fake_enable = false;
2445
2446         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2447                 DRM_ERROR("KMS: Failed to detect connector\n");
2448
2449         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2450                 emulated_link_detect(aconnector->dc_link);
2451
2452
2453                 drm_modeset_lock_all(dev);
2454                 dm_restore_drm_connector_state(dev, connector);
2455                 drm_modeset_unlock_all(dev);
2456
2457                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2458                         drm_kms_helper_hotplug_event(dev);
2459
2460         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2461                 if (new_connection_type == dc_connection_none &&
2462                     aconnector->dc_link->type == dc_connection_none)
2463                         dm_set_dpms_off(aconnector->dc_link);
2464
2465                 amdgpu_dm_update_connector_after_detect(aconnector);
2466
2467                 drm_modeset_lock_all(dev);
2468                 dm_restore_drm_connector_state(dev, connector);
2469                 drm_modeset_unlock_all(dev);
2470
2471                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2472                         drm_kms_helper_hotplug_event(dev);
2473         }
2474         mutex_unlock(&aconnector->hpd_lock);
2475
2476 }
2477
2478 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2479 {
2480         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2481         uint8_t dret;
2482         bool new_irq_handled = false;
2483         int dpcd_addr;
2484         int dpcd_bytes_to_read;
2485
2486         const int max_process_count = 30;
2487         int process_count = 0;
2488
2489         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2490
2491         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2492                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2493                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2494                 dpcd_addr = DP_SINK_COUNT;
2495         } else {
2496                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2497                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2498                 dpcd_addr = DP_SINK_COUNT_ESI;
2499         }
2500
2501         dret = drm_dp_dpcd_read(
2502                 &aconnector->dm_dp_aux.aux,
2503                 dpcd_addr,
2504                 esi,
2505                 dpcd_bytes_to_read);
2506
2507         while (dret == dpcd_bytes_to_read &&
2508                 process_count < max_process_count) {
2509                 uint8_t retry;
2510                 dret = 0;
2511
2512                 process_count++;
2513
2514                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2515                 /* handle HPD short pulse irq */
2516                 if (aconnector->mst_mgr.mst_state)
2517                         drm_dp_mst_hpd_irq(
2518                                 &aconnector->mst_mgr,
2519                                 esi,
2520                                 &new_irq_handled);
2521
2522                 if (new_irq_handled) {
2523                         /* ACK at DPCD to notify down stream */
2524                         const int ack_dpcd_bytes_to_write =
2525                                 dpcd_bytes_to_read - 1;
2526
2527                         for (retry = 0; retry < 3; retry++) {
2528                                 uint8_t wret;
2529
2530                                 wret = drm_dp_dpcd_write(
2531                                         &aconnector->dm_dp_aux.aux,
2532                                         dpcd_addr + 1,
2533                                         &esi[1],
2534                                         ack_dpcd_bytes_to_write);
2535                                 if (wret == ack_dpcd_bytes_to_write)
2536                                         break;
2537                         }
2538
2539                         /* check if there is new irq to be handled */
2540                         dret = drm_dp_dpcd_read(
2541                                 &aconnector->dm_dp_aux.aux,
2542                                 dpcd_addr,
2543                                 esi,
2544                                 dpcd_bytes_to_read);
2545
2546                         new_irq_handled = false;
2547                 } else {
2548                         break;
2549                 }
2550         }
2551
2552         if (process_count == max_process_count)
2553                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2554 }
2555
2556 static void handle_hpd_rx_irq(void *param)
2557 {
2558         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2559         struct drm_connector *connector = &aconnector->base;
2560         struct drm_device *dev = connector->dev;
2561         struct dc_link *dc_link = aconnector->dc_link;
2562         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2563         bool result = false;
2564         enum dc_connection_type new_connection_type = dc_connection_none;
2565         struct amdgpu_device *adev = drm_to_adev(dev);
2566         union hpd_irq_data hpd_irq_data;
2567
2568         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2569
2570         /*
2571          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2572          * conflict, after implement i2c helper, this mutex should be
2573          * retired.
2574          */
2575         if (dc_link->type != dc_connection_mst_branch)
2576                 mutex_lock(&aconnector->hpd_lock);
2577
2578         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2579
2580         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2581                 (dc_link->type == dc_connection_mst_branch)) {
2582                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2583                         result = true;
2584                         dm_handle_hpd_rx_irq(aconnector);
2585                         goto out;
2586                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2587                         result = false;
2588                         dm_handle_hpd_rx_irq(aconnector);
2589                         goto out;
2590                 }
2591         }
2592
2593         mutex_lock(&adev->dm.dc_lock);
2594 #ifdef CONFIG_DRM_AMD_DC_HDCP
2595         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2596 #else
2597         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2598 #endif
2599         mutex_unlock(&adev->dm.dc_lock);
2600
2601 out:
2602         if (result && !is_mst_root_connector) {
2603                 /* Downstream Port status changed. */
2604                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2605                         DRM_ERROR("KMS: Failed to detect connector\n");
2606
2607                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2608                         emulated_link_detect(dc_link);
2609
2610                         if (aconnector->fake_enable)
2611                                 aconnector->fake_enable = false;
2612
2613                         amdgpu_dm_update_connector_after_detect(aconnector);
2614
2615
2616                         drm_modeset_lock_all(dev);
2617                         dm_restore_drm_connector_state(dev, connector);
2618                         drm_modeset_unlock_all(dev);
2619
2620                         drm_kms_helper_hotplug_event(dev);
2621                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2622
2623                         if (aconnector->fake_enable)
2624                                 aconnector->fake_enable = false;
2625
2626                         amdgpu_dm_update_connector_after_detect(aconnector);
2627
2628
2629                         drm_modeset_lock_all(dev);
2630                         dm_restore_drm_connector_state(dev, connector);
2631                         drm_modeset_unlock_all(dev);
2632
2633                         drm_kms_helper_hotplug_event(dev);
2634                 }
2635         }
2636 #ifdef CONFIG_DRM_AMD_DC_HDCP
2637         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2638                 if (adev->dm.hdcp_workqueue)
2639                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2640         }
2641 #endif
2642
2643         if (dc_link->type != dc_connection_mst_branch) {
2644                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2645                 mutex_unlock(&aconnector->hpd_lock);
2646         }
2647 }
2648
2649 static void register_hpd_handlers(struct amdgpu_device *adev)
2650 {
2651         struct drm_device *dev = adev_to_drm(adev);
2652         struct drm_connector *connector;
2653         struct amdgpu_dm_connector *aconnector;
2654         const struct dc_link *dc_link;
2655         struct dc_interrupt_params int_params = {0};
2656
2657         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2658         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2659
2660         list_for_each_entry(connector,
2661                         &dev->mode_config.connector_list, head) {
2662
2663                 aconnector = to_amdgpu_dm_connector(connector);
2664                 dc_link = aconnector->dc_link;
2665
2666                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2667                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2668                         int_params.irq_source = dc_link->irq_source_hpd;
2669
2670                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2671                                         handle_hpd_irq,
2672                                         (void *) aconnector);
2673                 }
2674
2675                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2676
2677                         /* Also register for DP short pulse (hpd_rx). */
2678                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2679                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2680
2681                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2682                                         handle_hpd_rx_irq,
2683                                         (void *) aconnector);
2684                 }
2685         }
2686 }
2687
2688 #if defined(CONFIG_DRM_AMD_DC_SI)
2689 /* Register IRQ sources and initialize IRQ callbacks */
2690 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2691 {
2692         struct dc *dc = adev->dm.dc;
2693         struct common_irq_params *c_irq_params;
2694         struct dc_interrupt_params int_params = {0};
2695         int r;
2696         int i;
2697         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2698
2699         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2700         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2701
2702         /*
2703          * Actions of amdgpu_irq_add_id():
2704          * 1. Register a set() function with base driver.
2705          *    Base driver will call set() function to enable/disable an
2706          *    interrupt in DC hardware.
2707          * 2. Register amdgpu_dm_irq_handler().
2708          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2709          *    coming from DC hardware.
2710          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2711          *    for acknowledging and handling. */
2712
2713         /* Use VBLANK interrupt */
2714         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2715                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2716                 if (r) {
2717                         DRM_ERROR("Failed to add crtc irq id!\n");
2718                         return r;
2719                 }
2720
2721                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2722                 int_params.irq_source =
2723                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2724
2725                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2726
2727                 c_irq_params->adev = adev;
2728                 c_irq_params->irq_src = int_params.irq_source;
2729
2730                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2731                                 dm_crtc_high_irq, c_irq_params);
2732         }
2733
2734         /* Use GRPH_PFLIP interrupt */
2735         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2736                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2737                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2738                 if (r) {
2739                         DRM_ERROR("Failed to add page flip irq id!\n");
2740                         return r;
2741                 }
2742
2743                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2744                 int_params.irq_source =
2745                         dc_interrupt_to_irq_source(dc, i, 0);
2746
2747                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2748
2749                 c_irq_params->adev = adev;
2750                 c_irq_params->irq_src = int_params.irq_source;
2751
2752                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2753                                 dm_pflip_high_irq, c_irq_params);
2754
2755         }
2756
2757         /* HPD */
2758         r = amdgpu_irq_add_id(adev, client_id,
2759                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2760         if (r) {
2761                 DRM_ERROR("Failed to add hpd irq id!\n");
2762                 return r;
2763         }
2764
2765         register_hpd_handlers(adev);
2766
2767         return 0;
2768 }
2769 #endif
2770
2771 /* Register IRQ sources and initialize IRQ callbacks */
2772 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2773 {
2774         struct dc *dc = adev->dm.dc;
2775         struct common_irq_params *c_irq_params;
2776         struct dc_interrupt_params int_params = {0};
2777         int r;
2778         int i;
2779         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2780
2781         if (adev->asic_type >= CHIP_VEGA10)
2782                 client_id = SOC15_IH_CLIENTID_DCE;
2783
2784         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2785         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2786
2787         /*
2788          * Actions of amdgpu_irq_add_id():
2789          * 1. Register a set() function with base driver.
2790          *    Base driver will call set() function to enable/disable an
2791          *    interrupt in DC hardware.
2792          * 2. Register amdgpu_dm_irq_handler().
2793          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2794          *    coming from DC hardware.
2795          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2796          *    for acknowledging and handling. */
2797
2798         /* Use VBLANK interrupt */
2799         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2800                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2801                 if (r) {
2802                         DRM_ERROR("Failed to add crtc irq id!\n");
2803                         return r;
2804                 }
2805
2806                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2807                 int_params.irq_source =
2808                         dc_interrupt_to_irq_source(dc, i, 0);
2809
2810                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2811
2812                 c_irq_params->adev = adev;
2813                 c_irq_params->irq_src = int_params.irq_source;
2814
2815                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2816                                 dm_crtc_high_irq, c_irq_params);
2817         }
2818
2819         /* Use VUPDATE interrupt */
2820         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2821                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2822                 if (r) {
2823                         DRM_ERROR("Failed to add vupdate irq id!\n");
2824                         return r;
2825                 }
2826
2827                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2828                 int_params.irq_source =
2829                         dc_interrupt_to_irq_source(dc, i, 0);
2830
2831                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2832
2833                 c_irq_params->adev = adev;
2834                 c_irq_params->irq_src = int_params.irq_source;
2835
2836                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2837                                 dm_vupdate_high_irq, c_irq_params);
2838         }
2839
2840         /* Use GRPH_PFLIP interrupt */
2841         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2842                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2843                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2844                 if (r) {
2845                         DRM_ERROR("Failed to add page flip irq id!\n");
2846                         return r;
2847                 }
2848
2849                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2850                 int_params.irq_source =
2851                         dc_interrupt_to_irq_source(dc, i, 0);
2852
2853                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2854
2855                 c_irq_params->adev = adev;
2856                 c_irq_params->irq_src = int_params.irq_source;
2857
2858                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2859                                 dm_pflip_high_irq, c_irq_params);
2860
2861         }
2862
2863         /* HPD */
2864         r = amdgpu_irq_add_id(adev, client_id,
2865                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2866         if (r) {
2867                 DRM_ERROR("Failed to add hpd irq id!\n");
2868                 return r;
2869         }
2870
2871         register_hpd_handlers(adev);
2872
2873         return 0;
2874 }
2875
2876 #if defined(CONFIG_DRM_AMD_DC_DCN)
2877 /* Register IRQ sources and initialize IRQ callbacks */
2878 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2879 {
2880         struct dc *dc = adev->dm.dc;
2881         struct common_irq_params *c_irq_params;
2882         struct dc_interrupt_params int_params = {0};
2883         int r;
2884         int i;
2885
2886         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2887         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2888
2889         /*
2890          * Actions of amdgpu_irq_add_id():
2891          * 1. Register a set() function with base driver.
2892          *    Base driver will call set() function to enable/disable an
2893          *    interrupt in DC hardware.
2894          * 2. Register amdgpu_dm_irq_handler().
2895          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2896          *    coming from DC hardware.
2897          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2898          *    for acknowledging and handling.
2899          */
2900
2901         /* Use VSTARTUP interrupt */
2902         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2903                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2904                         i++) {
2905                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2906
2907                 if (r) {
2908                         DRM_ERROR("Failed to add crtc irq id!\n");
2909                         return r;
2910                 }
2911
2912                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2913                 int_params.irq_source =
2914                         dc_interrupt_to_irq_source(dc, i, 0);
2915
2916                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2917
2918                 c_irq_params->adev = adev;
2919                 c_irq_params->irq_src = int_params.irq_source;
2920
2921                 amdgpu_dm_irq_register_interrupt(
2922                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2923         }
2924
2925         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2926          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2927          * to trigger at end of each vblank, regardless of state of the lock,
2928          * matching DCE behaviour.
2929          */
2930         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2931              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2932              i++) {
2933                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2934
2935                 if (r) {
2936                         DRM_ERROR("Failed to add vupdate irq id!\n");
2937                         return r;
2938                 }
2939
2940                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2941                 int_params.irq_source =
2942                         dc_interrupt_to_irq_source(dc, i, 0);
2943
2944                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2945
2946                 c_irq_params->adev = adev;
2947                 c_irq_params->irq_src = int_params.irq_source;
2948
2949                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2950                                 dm_vupdate_high_irq, c_irq_params);
2951         }
2952
2953         /* Use GRPH_PFLIP interrupt */
2954         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2955                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2956                         i++) {
2957                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2958                 if (r) {
2959                         DRM_ERROR("Failed to add page flip irq id!\n");
2960                         return r;
2961                 }
2962
2963                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2964                 int_params.irq_source =
2965                         dc_interrupt_to_irq_source(dc, i, 0);
2966
2967                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2968
2969                 c_irq_params->adev = adev;
2970                 c_irq_params->irq_src = int_params.irq_source;
2971
2972                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2973                                 dm_pflip_high_irq, c_irq_params);
2974
2975         }
2976
2977         /* HPD */
2978         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2979                         &adev->hpd_irq);
2980         if (r) {
2981                 DRM_ERROR("Failed to add hpd irq id!\n");
2982                 return r;
2983         }
2984
2985         register_hpd_handlers(adev);
2986
2987         return 0;
2988 }
2989 #endif
2990
2991 /*
2992  * Acquires the lock for the atomic state object and returns
2993  * the new atomic state.
2994  *
2995  * This should only be called during atomic check.
2996  */
2997 static int dm_atomic_get_state(struct drm_atomic_state *state,
2998                                struct dm_atomic_state **dm_state)
2999 {
3000         struct drm_device *dev = state->dev;
3001         struct amdgpu_device *adev = drm_to_adev(dev);
3002         struct amdgpu_display_manager *dm = &adev->dm;
3003         struct drm_private_state *priv_state;
3004
3005         if (*dm_state)
3006                 return 0;
3007
3008         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3009         if (IS_ERR(priv_state))
3010                 return PTR_ERR(priv_state);
3011
3012         *dm_state = to_dm_atomic_state(priv_state);
3013
3014         return 0;
3015 }
3016
3017 static struct dm_atomic_state *
3018 dm_atomic_get_new_state(struct drm_atomic_state *state)
3019 {
3020         struct drm_device *dev = state->dev;
3021         struct amdgpu_device *adev = drm_to_adev(dev);
3022         struct amdgpu_display_manager *dm = &adev->dm;
3023         struct drm_private_obj *obj;
3024         struct drm_private_state *new_obj_state;
3025         int i;
3026
3027         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3028                 if (obj->funcs == dm->atomic_obj.funcs)
3029                         return to_dm_atomic_state(new_obj_state);
3030         }
3031
3032         return NULL;
3033 }
3034
3035 static struct drm_private_state *
3036 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3037 {
3038         struct dm_atomic_state *old_state, *new_state;
3039
3040         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3041         if (!new_state)
3042                 return NULL;
3043
3044         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3045
3046         old_state = to_dm_atomic_state(obj->state);
3047
3048         if (old_state && old_state->context)
3049                 new_state->context = dc_copy_state(old_state->context);
3050
3051         if (!new_state->context) {
3052                 kfree(new_state);
3053                 return NULL;
3054         }
3055
3056         return &new_state->base;
3057 }
3058
3059 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3060                                     struct drm_private_state *state)
3061 {
3062         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3063
3064         if (dm_state && dm_state->context)
3065                 dc_release_state(dm_state->context);
3066
3067         kfree(dm_state);
3068 }
3069
3070 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3071         .atomic_duplicate_state = dm_atomic_duplicate_state,
3072         .atomic_destroy_state = dm_atomic_destroy_state,
3073 };
3074
3075 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3076 {
3077         struct dm_atomic_state *state;
3078         int r;
3079
3080         adev->mode_info.mode_config_initialized = true;
3081
3082         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3083         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3084
3085         adev_to_drm(adev)->mode_config.max_width = 16384;
3086         adev_to_drm(adev)->mode_config.max_height = 16384;
3087
3088         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3089         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3090         /* indicates support for immediate flip */
3091         adev_to_drm(adev)->mode_config.async_page_flip = true;
3092
3093         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3094
3095         state = kzalloc(sizeof(*state), GFP_KERNEL);
3096         if (!state)
3097                 return -ENOMEM;
3098
3099         state->context = dc_create_state(adev->dm.dc);
3100         if (!state->context) {
3101                 kfree(state);
3102                 return -ENOMEM;
3103         }
3104
3105         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3106
3107         drm_atomic_private_obj_init(adev_to_drm(adev),
3108                                     &adev->dm.atomic_obj,
3109                                     &state->base,
3110                                     &dm_atomic_state_funcs);
3111
3112         r = amdgpu_display_modeset_create_props(adev);
3113         if (r) {
3114                 dc_release_state(state->context);
3115                 kfree(state);
3116                 return r;
3117         }
3118
3119         r = amdgpu_dm_audio_init(adev);
3120         if (r) {
3121                 dc_release_state(state->context);
3122                 kfree(state);
3123                 return r;
3124         }
3125
3126         return 0;
3127 }
3128
3129 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3130 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3131 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3132
3133 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3134         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3135
3136 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3137 {
3138 #if defined(CONFIG_ACPI)
3139         struct amdgpu_dm_backlight_caps caps;
3140
3141         memset(&caps, 0, sizeof(caps));
3142
3143         if (dm->backlight_caps.caps_valid)
3144                 return;
3145
3146         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3147         if (caps.caps_valid) {
3148                 dm->backlight_caps.caps_valid = true;
3149                 if (caps.aux_support)
3150                         return;
3151                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3152                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3153         } else {
3154                 dm->backlight_caps.min_input_signal =
3155                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3156                 dm->backlight_caps.max_input_signal =
3157                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3158         }
3159 #else
3160         if (dm->backlight_caps.aux_support)
3161                 return;
3162
3163         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3164         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3165 #endif
3166 }
3167
3168 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3169 {
3170         bool rc;
3171
3172         if (!link)
3173                 return 1;
3174
3175         rc = dc_link_set_backlight_level_nits(link, true, brightness,
3176                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3177
3178         return rc ? 0 : 1;
3179 }
3180
3181 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3182                                 unsigned *min, unsigned *max)
3183 {
3184         if (!caps)
3185                 return 0;
3186
3187         if (caps->aux_support) {
3188                 // Firmware limits are in nits, DC API wants millinits.
3189                 *max = 1000 * caps->aux_max_input_signal;
3190                 *min = 1000 * caps->aux_min_input_signal;
3191         } else {
3192                 // Firmware limits are 8-bit, PWM control is 16-bit.
3193                 *max = 0x101 * caps->max_input_signal;
3194                 *min = 0x101 * caps->min_input_signal;
3195         }
3196         return 1;
3197 }
3198
3199 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3200                                         uint32_t brightness)
3201 {
3202         unsigned min, max;
3203
3204         if (!get_brightness_range(caps, &min, &max))
3205                 return brightness;
3206
3207         // Rescale 0..255 to min..max
3208         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3209                                        AMDGPU_MAX_BL_LEVEL);
3210 }
3211
3212 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3213                                       uint32_t brightness)
3214 {
3215         unsigned min, max;
3216
3217         if (!get_brightness_range(caps, &min, &max))
3218                 return brightness;
3219
3220         if (brightness < min)
3221                 return 0;
3222         // Rescale min..max to 0..255
3223         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3224                                  max - min);
3225 }
3226
3227 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3228 {
3229         struct amdgpu_display_manager *dm = bl_get_data(bd);
3230         struct amdgpu_dm_backlight_caps caps;
3231         struct dc_link *link = NULL;
3232         u32 brightness;
3233         bool rc;
3234
3235         amdgpu_dm_update_backlight_caps(dm);
3236         caps = dm->backlight_caps;
3237
3238         link = (struct dc_link *)dm->backlight_link;
3239
3240         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3241         // Change brightness based on AUX property
3242         if (caps.aux_support)
3243                 return set_backlight_via_aux(link, brightness);
3244
3245         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3246
3247         return rc ? 0 : 1;
3248 }
3249
3250 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3251 {
3252         struct amdgpu_display_manager *dm = bl_get_data(bd);
3253         int ret = dc_link_get_backlight_level(dm->backlight_link);
3254
3255         if (ret == DC_ERROR_UNEXPECTED)
3256                 return bd->props.brightness;
3257         return convert_brightness_to_user(&dm->backlight_caps, ret);
3258 }
3259
3260 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3261         .options = BL_CORE_SUSPENDRESUME,
3262         .get_brightness = amdgpu_dm_backlight_get_brightness,
3263         .update_status  = amdgpu_dm_backlight_update_status,
3264 };
3265
3266 static void
3267 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3268 {
3269         char bl_name[16];
3270         struct backlight_properties props = { 0 };
3271
3272         amdgpu_dm_update_backlight_caps(dm);
3273
3274         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3275         props.brightness = AMDGPU_MAX_BL_LEVEL;
3276         props.type = BACKLIGHT_RAW;
3277
3278         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3279                  adev_to_drm(dm->adev)->primary->index);
3280
3281         dm->backlight_dev = backlight_device_register(bl_name,
3282                                                       adev_to_drm(dm->adev)->dev,
3283                                                       dm,
3284                                                       &amdgpu_dm_backlight_ops,
3285                                                       &props);
3286
3287         if (IS_ERR(dm->backlight_dev))
3288                 DRM_ERROR("DM: Backlight registration failed!\n");
3289         else
3290                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3291 }
3292
3293 #endif
3294
3295 static int initialize_plane(struct amdgpu_display_manager *dm,
3296                             struct amdgpu_mode_info *mode_info, int plane_id,
3297                             enum drm_plane_type plane_type,
3298                             const struct dc_plane_cap *plane_cap)
3299 {
3300         struct drm_plane *plane;
3301         unsigned long possible_crtcs;
3302         int ret = 0;
3303
3304         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3305         if (!plane) {
3306                 DRM_ERROR("KMS: Failed to allocate plane\n");
3307                 return -ENOMEM;
3308         }
3309         plane->type = plane_type;
3310
3311         /*
3312          * HACK: IGT tests expect that the primary plane for a CRTC
3313          * can only have one possible CRTC. Only expose support for
3314          * any CRTC if they're not going to be used as a primary plane
3315          * for a CRTC - like overlay or underlay planes.
3316          */
3317         possible_crtcs = 1 << plane_id;
3318         if (plane_id >= dm->dc->caps.max_streams)
3319                 possible_crtcs = 0xff;
3320
3321         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3322
3323         if (ret) {
3324                 DRM_ERROR("KMS: Failed to initialize plane\n");
3325                 kfree(plane);
3326                 return ret;
3327         }
3328
3329         if (mode_info)
3330                 mode_info->planes[plane_id] = plane;
3331
3332         return ret;
3333 }
3334
3335
3336 static void register_backlight_device(struct amdgpu_display_manager *dm,
3337                                       struct dc_link *link)
3338 {
3339 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3340         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3341
3342         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3343             link->type != dc_connection_none) {
3344                 /*
3345                  * Event if registration failed, we should continue with
3346                  * DM initialization because not having a backlight control
3347                  * is better then a black screen.
3348                  */
3349                 amdgpu_dm_register_backlight_device(dm);
3350
3351                 if (dm->backlight_dev)
3352                         dm->backlight_link = link;
3353         }
3354 #endif
3355 }
3356
3357
3358 /*
3359  * In this architecture, the association
3360  * connector -> encoder -> crtc
3361  * id not really requried. The crtc and connector will hold the
3362  * display_index as an abstraction to use with DAL component
3363  *
3364  * Returns 0 on success
3365  */
3366 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3367 {
3368         struct amdgpu_display_manager *dm = &adev->dm;
3369         int32_t i;
3370         struct amdgpu_dm_connector *aconnector = NULL;
3371         struct amdgpu_encoder *aencoder = NULL;
3372         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3373         uint32_t link_cnt;
3374         int32_t primary_planes;
3375         enum dc_connection_type new_connection_type = dc_connection_none;
3376         const struct dc_plane_cap *plane;
3377
3378         dm->display_indexes_num = dm->dc->caps.max_streams;
3379         /* Update the actual used number of crtc */
3380         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3381
3382         link_cnt = dm->dc->caps.max_links;
3383         if (amdgpu_dm_mode_config_init(dm->adev)) {
3384                 DRM_ERROR("DM: Failed to initialize mode config\n");
3385                 return -EINVAL;
3386         }
3387
3388         /* There is one primary plane per CRTC */
3389         primary_planes = dm->dc->caps.max_streams;
3390         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3391
3392         /*
3393          * Initialize primary planes, implicit planes for legacy IOCTLS.
3394          * Order is reversed to match iteration order in atomic check.
3395          */
3396         for (i = (primary_planes - 1); i >= 0; i--) {
3397                 plane = &dm->dc->caps.planes[i];
3398
3399                 if (initialize_plane(dm, mode_info, i,
3400                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3401                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3402                         goto fail;
3403                 }
3404         }
3405
3406         /*
3407          * Initialize overlay planes, index starting after primary planes.
3408          * These planes have a higher DRM index than the primary planes since
3409          * they should be considered as having a higher z-order.
3410          * Order is reversed to match iteration order in atomic check.
3411          *
3412          * Only support DCN for now, and only expose one so we don't encourage
3413          * userspace to use up all the pipes.
3414          */
3415         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3416                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3417
3418                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3419                         continue;
3420
3421                 if (!plane->blends_with_above || !plane->blends_with_below)
3422                         continue;
3423
3424                 if (!plane->pixel_format_support.argb8888)
3425                         continue;
3426
3427                 if (initialize_plane(dm, NULL, primary_planes + i,
3428                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3429                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3430                         goto fail;
3431                 }
3432
3433                 /* Only create one overlay plane. */
3434                 break;
3435         }
3436
3437         for (i = 0; i < dm->dc->caps.max_streams; i++)
3438                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3439                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3440                         goto fail;
3441                 }
3442
3443         /* loops over all connectors on the board */
3444         for (i = 0; i < link_cnt; i++) {
3445                 struct dc_link *link = NULL;
3446
3447                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3448                         DRM_ERROR(
3449                                 "KMS: Cannot support more than %d display indexes\n",
3450                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3451                         continue;
3452                 }
3453
3454                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3455                 if (!aconnector)
3456                         goto fail;
3457
3458                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3459                 if (!aencoder)
3460                         goto fail;
3461
3462                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3463                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3464                         goto fail;
3465                 }
3466
3467                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3468                         DRM_ERROR("KMS: Failed to initialize connector\n");
3469                         goto fail;
3470                 }
3471
3472                 link = dc_get_link_at_index(dm->dc, i);
3473
3474                 if (!dc_link_detect_sink(link, &new_connection_type))
3475                         DRM_ERROR("KMS: Failed to detect connector\n");
3476
3477                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3478                         emulated_link_detect(link);
3479                         amdgpu_dm_update_connector_after_detect(aconnector);
3480
3481                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3482                         amdgpu_dm_update_connector_after_detect(aconnector);
3483                         register_backlight_device(dm, link);
3484                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3485                                 amdgpu_dm_set_psr_caps(link);
3486                 }
3487
3488
3489         }
3490
3491         /* Software is initialized. Now we can register interrupt handlers. */
3492         switch (adev->asic_type) {
3493 #if defined(CONFIG_DRM_AMD_DC_SI)
3494         case CHIP_TAHITI:
3495         case CHIP_PITCAIRN:
3496         case CHIP_VERDE:
3497         case CHIP_OLAND:
3498                 if (dce60_register_irq_handlers(dm->adev)) {
3499                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3500                         goto fail;
3501                 }
3502                 break;
3503 #endif
3504         case CHIP_BONAIRE:
3505         case CHIP_HAWAII:
3506         case CHIP_KAVERI:
3507         case CHIP_KABINI:
3508         case CHIP_MULLINS:
3509         case CHIP_TONGA:
3510         case CHIP_FIJI:
3511         case CHIP_CARRIZO:
3512         case CHIP_STONEY:
3513         case CHIP_POLARIS11:
3514         case CHIP_POLARIS10:
3515         case CHIP_POLARIS12:
3516         case CHIP_VEGAM:
3517         case CHIP_VEGA10:
3518         case CHIP_VEGA12:
3519         case CHIP_VEGA20:
3520                 if (dce110_register_irq_handlers(dm->adev)) {
3521                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3522                         goto fail;
3523                 }
3524                 break;
3525 #if defined(CONFIG_DRM_AMD_DC_DCN)
3526         case CHIP_RAVEN:
3527         case CHIP_NAVI12:
3528         case CHIP_NAVI10:
3529         case CHIP_NAVI14:
3530         case CHIP_RENOIR:
3531         case CHIP_SIENNA_CICHLID:
3532         case CHIP_NAVY_FLOUNDER:
3533         case CHIP_DIMGREY_CAVEFISH:
3534         case CHIP_VANGOGH:
3535                 if (dcn10_register_irq_handlers(dm->adev)) {
3536                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3537                         goto fail;
3538                 }
3539                 break;
3540 #endif
3541         default:
3542                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3543                 goto fail;
3544         }
3545
3546         return 0;
3547 fail:
3548         kfree(aencoder);
3549         kfree(aconnector);
3550
3551         return -EINVAL;
3552 }
3553
3554 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3555 {
3556         drm_mode_config_cleanup(dm->ddev);
3557         drm_atomic_private_obj_fini(&dm->atomic_obj);
3558         return;
3559 }
3560
3561 /******************************************************************************
3562  * amdgpu_display_funcs functions
3563  *****************************************************************************/
3564
3565 /*
3566  * dm_bandwidth_update - program display watermarks
3567  *
3568  * @adev: amdgpu_device pointer
3569  *
3570  * Calculate and program the display watermarks and line buffer allocation.
3571  */
3572 static void dm_bandwidth_update(struct amdgpu_device *adev)
3573 {
3574         /* TODO: implement later */
3575 }
3576
3577 static const struct amdgpu_display_funcs dm_display_funcs = {
3578         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3579         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3580         .backlight_set_level = NULL, /* never called for DC */
3581         .backlight_get_level = NULL, /* never called for DC */
3582         .hpd_sense = NULL,/* called unconditionally */
3583         .hpd_set_polarity = NULL, /* called unconditionally */
3584         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3585         .page_flip_get_scanoutpos =
3586                 dm_crtc_get_scanoutpos,/* called unconditionally */
3587         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3588         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3589 };
3590
3591 #if defined(CONFIG_DEBUG_KERNEL_DC)
3592
3593 static ssize_t s3_debug_store(struct device *device,
3594                               struct device_attribute *attr,
3595                               const char *buf,
3596                               size_t count)
3597 {
3598         int ret;
3599         int s3_state;
3600         struct drm_device *drm_dev = dev_get_drvdata(device);
3601         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3602
3603         ret = kstrtoint(buf, 0, &s3_state);
3604
3605         if (ret == 0) {
3606                 if (s3_state) {
3607                         dm_resume(adev);
3608                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3609                 } else
3610                         dm_suspend(adev);
3611         }
3612
3613         return ret == 0 ? count : 0;
3614 }
3615
3616 DEVICE_ATTR_WO(s3_debug);
3617
3618 #endif
3619
3620 static int dm_early_init(void *handle)
3621 {
3622         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3623
3624         switch (adev->asic_type) {
3625 #if defined(CONFIG_DRM_AMD_DC_SI)
3626         case CHIP_TAHITI:
3627         case CHIP_PITCAIRN:
3628         case CHIP_VERDE:
3629                 adev->mode_info.num_crtc = 6;
3630                 adev->mode_info.num_hpd = 6;
3631                 adev->mode_info.num_dig = 6;
3632                 break;
3633         case CHIP_OLAND:
3634                 adev->mode_info.num_crtc = 2;
3635                 adev->mode_info.num_hpd = 2;
3636                 adev->mode_info.num_dig = 2;
3637                 break;
3638 #endif
3639         case CHIP_BONAIRE:
3640         case CHIP_HAWAII:
3641                 adev->mode_info.num_crtc = 6;
3642                 adev->mode_info.num_hpd = 6;
3643                 adev->mode_info.num_dig = 6;
3644                 break;
3645         case CHIP_KAVERI:
3646                 adev->mode_info.num_crtc = 4;
3647                 adev->mode_info.num_hpd = 6;
3648                 adev->mode_info.num_dig = 7;
3649                 break;
3650         case CHIP_KABINI:
3651         case CHIP_MULLINS:
3652                 adev->mode_info.num_crtc = 2;
3653                 adev->mode_info.num_hpd = 6;
3654                 adev->mode_info.num_dig = 6;
3655                 break;
3656         case CHIP_FIJI:
3657         case CHIP_TONGA:
3658                 adev->mode_info.num_crtc = 6;
3659                 adev->mode_info.num_hpd = 6;
3660                 adev->mode_info.num_dig = 7;
3661                 break;
3662         case CHIP_CARRIZO:
3663                 adev->mode_info.num_crtc = 3;
3664                 adev->mode_info.num_hpd = 6;
3665                 adev->mode_info.num_dig = 9;
3666                 break;
3667         case CHIP_STONEY:
3668                 adev->mode_info.num_crtc = 2;
3669                 adev->mode_info.num_hpd = 6;
3670                 adev->mode_info.num_dig = 9;
3671                 break;
3672         case CHIP_POLARIS11:
3673         case CHIP_POLARIS12:
3674                 adev->mode_info.num_crtc = 5;
3675                 adev->mode_info.num_hpd = 5;
3676                 adev->mode_info.num_dig = 5;
3677                 break;
3678         case CHIP_POLARIS10:
3679         case CHIP_VEGAM:
3680                 adev->mode_info.num_crtc = 6;
3681                 adev->mode_info.num_hpd = 6;
3682                 adev->mode_info.num_dig = 6;
3683                 break;
3684         case CHIP_VEGA10:
3685         case CHIP_VEGA12:
3686         case CHIP_VEGA20:
3687                 adev->mode_info.num_crtc = 6;
3688                 adev->mode_info.num_hpd = 6;
3689                 adev->mode_info.num_dig = 6;
3690                 break;
3691 #if defined(CONFIG_DRM_AMD_DC_DCN)
3692         case CHIP_RAVEN:
3693         case CHIP_RENOIR:
3694         case CHIP_VANGOGH:
3695                 adev->mode_info.num_crtc = 4;
3696                 adev->mode_info.num_hpd = 4;
3697                 adev->mode_info.num_dig = 4;
3698                 break;
3699         case CHIP_NAVI10:
3700         case CHIP_NAVI12:
3701         case CHIP_SIENNA_CICHLID:
3702         case CHIP_NAVY_FLOUNDER:
3703                 adev->mode_info.num_crtc = 6;
3704                 adev->mode_info.num_hpd = 6;
3705                 adev->mode_info.num_dig = 6;
3706                 break;
3707         case CHIP_NAVI14:
3708         case CHIP_DIMGREY_CAVEFISH:
3709                 adev->mode_info.num_crtc = 5;
3710                 adev->mode_info.num_hpd = 5;
3711                 adev->mode_info.num_dig = 5;
3712                 break;
3713 #endif
3714         default:
3715                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3716                 return -EINVAL;
3717         }
3718
3719         amdgpu_dm_set_irq_funcs(adev);
3720
3721         if (adev->mode_info.funcs == NULL)
3722                 adev->mode_info.funcs = &dm_display_funcs;
3723
3724         /*
3725          * Note: Do NOT change adev->audio_endpt_rreg and
3726          * adev->audio_endpt_wreg because they are initialised in
3727          * amdgpu_device_init()
3728          */
3729 #if defined(CONFIG_DEBUG_KERNEL_DC)
3730         device_create_file(
3731                 adev_to_drm(adev)->dev,
3732                 &dev_attr_s3_debug);
3733 #endif
3734
3735         return 0;
3736 }
3737
3738 static bool modeset_required(struct drm_crtc_state *crtc_state,
3739                              struct dc_stream_state *new_stream,
3740                              struct dc_stream_state *old_stream)
3741 {
3742         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3743 }
3744
3745 static bool modereset_required(struct drm_crtc_state *crtc_state)
3746 {
3747         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3748 }
3749
3750 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3751 {
3752         drm_encoder_cleanup(encoder);
3753         kfree(encoder);
3754 }
3755
3756 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3757         .destroy = amdgpu_dm_encoder_destroy,
3758 };
3759
3760
3761 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3762                                          struct drm_framebuffer *fb,
3763                                          int *min_downscale, int *max_upscale)
3764 {
3765         struct amdgpu_device *adev = drm_to_adev(dev);
3766         struct dc *dc = adev->dm.dc;
3767         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3768         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3769
3770         switch (fb->format->format) {
3771         case DRM_FORMAT_P010:
3772         case DRM_FORMAT_NV12:
3773         case DRM_FORMAT_NV21:
3774                 *max_upscale = plane_cap->max_upscale_factor.nv12;
3775                 *min_downscale = plane_cap->max_downscale_factor.nv12;
3776                 break;
3777
3778         case DRM_FORMAT_XRGB16161616F:
3779         case DRM_FORMAT_ARGB16161616F:
3780         case DRM_FORMAT_XBGR16161616F:
3781         case DRM_FORMAT_ABGR16161616F:
3782                 *max_upscale = plane_cap->max_upscale_factor.fp16;
3783                 *min_downscale = plane_cap->max_downscale_factor.fp16;
3784                 break;
3785
3786         default:
3787                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3788                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3789                 break;
3790         }
3791
3792         /*
3793          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3794          * scaling factor of 1.0 == 1000 units.
3795          */
3796         if (*max_upscale == 1)
3797                 *max_upscale = 1000;
3798
3799         if (*min_downscale == 1)
3800                 *min_downscale = 1000;
3801 }
3802
3803
3804 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3805                                 struct dc_scaling_info *scaling_info)
3806 {
3807         int scale_w, scale_h, min_downscale, max_upscale;
3808
3809         memset(scaling_info, 0, sizeof(*scaling_info));
3810
3811         /* Source is fixed 16.16 but we ignore mantissa for now... */
3812         scaling_info->src_rect.x = state->src_x >> 16;
3813         scaling_info->src_rect.y = state->src_y >> 16;
3814
3815         scaling_info->src_rect.width = state->src_w >> 16;
3816         if (scaling_info->src_rect.width == 0)
3817                 return -EINVAL;
3818
3819         scaling_info->src_rect.height = state->src_h >> 16;
3820         if (scaling_info->src_rect.height == 0)
3821                 return -EINVAL;
3822
3823         scaling_info->dst_rect.x = state->crtc_x;
3824         scaling_info->dst_rect.y = state->crtc_y;
3825
3826         if (state->crtc_w == 0)
3827                 return -EINVAL;
3828
3829         scaling_info->dst_rect.width = state->crtc_w;
3830
3831         if (state->crtc_h == 0)
3832                 return -EINVAL;
3833
3834         scaling_info->dst_rect.height = state->crtc_h;
3835
3836         /* DRM doesn't specify clipping on destination output. */
3837         scaling_info->clip_rect = scaling_info->dst_rect;
3838
3839         /* Validate scaling per-format with DC plane caps */
3840         if (state->plane && state->plane->dev && state->fb) {
3841                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3842                                              &min_downscale, &max_upscale);
3843         } else {
3844                 min_downscale = 250;
3845                 max_upscale = 16000;
3846         }
3847
3848         scale_w = scaling_info->dst_rect.width * 1000 /
3849                   scaling_info->src_rect.width;
3850
3851         if (scale_w < min_downscale || scale_w > max_upscale)
3852                 return -EINVAL;
3853
3854         scale_h = scaling_info->dst_rect.height * 1000 /
3855                   scaling_info->src_rect.height;
3856
3857         if (scale_h < min_downscale || scale_h > max_upscale)
3858                 return -EINVAL;
3859
3860         /*
3861          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3862          * assume reasonable defaults based on the format.
3863          */
3864
3865         return 0;
3866 }
3867
3868 static void
3869 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3870                                  uint64_t tiling_flags)
3871 {
3872         /* Fill GFX8 params */
3873         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3874                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3875
3876                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3877                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3878                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3879                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3880                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3881
3882                 /* XXX fix me for VI */
3883                 tiling_info->gfx8.num_banks = num_banks;
3884                 tiling_info->gfx8.array_mode =
3885                                 DC_ARRAY_2D_TILED_THIN1;
3886                 tiling_info->gfx8.tile_split = tile_split;
3887                 tiling_info->gfx8.bank_width = bankw;
3888                 tiling_info->gfx8.bank_height = bankh;
3889                 tiling_info->gfx8.tile_aspect = mtaspect;
3890                 tiling_info->gfx8.tile_mode =
3891                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3892         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3893                         == DC_ARRAY_1D_TILED_THIN1) {
3894                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3895         }
3896
3897         tiling_info->gfx8.pipe_config =
3898                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3899 }
3900
3901 static void
3902 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3903                                   union dc_tiling_info *tiling_info)
3904 {
3905         tiling_info->gfx9.num_pipes =
3906                 adev->gfx.config.gb_addr_config_fields.num_pipes;
3907         tiling_info->gfx9.num_banks =
3908                 adev->gfx.config.gb_addr_config_fields.num_banks;
3909         tiling_info->gfx9.pipe_interleave =
3910                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3911         tiling_info->gfx9.num_shader_engines =
3912                 adev->gfx.config.gb_addr_config_fields.num_se;
3913         tiling_info->gfx9.max_compressed_frags =
3914                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3915         tiling_info->gfx9.num_rb_per_se =
3916                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3917         tiling_info->gfx9.shaderEnable = 1;
3918         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3919             adev->asic_type == CHIP_NAVY_FLOUNDER ||
3920             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3921             adev->asic_type == CHIP_VANGOGH)
3922                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3923 }
3924
3925 static int
3926 validate_dcc(struct amdgpu_device *adev,
3927              const enum surface_pixel_format format,
3928              const enum dc_rotation_angle rotation,
3929              const union dc_tiling_info *tiling_info,
3930              const struct dc_plane_dcc_param *dcc,
3931              const struct dc_plane_address *address,
3932              const struct plane_size *plane_size)
3933 {
3934         struct dc *dc = adev->dm.dc;
3935         struct dc_dcc_surface_param input;
3936         struct dc_surface_dcc_cap output;
3937
3938         memset(&input, 0, sizeof(input));
3939         memset(&output, 0, sizeof(output));
3940
3941         if (!dcc->enable)
3942                 return 0;
3943
3944         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3945             !dc->cap_funcs.get_dcc_compression_cap)
3946                 return -EINVAL;
3947
3948         input.format = format;
3949         input.surface_size.width = plane_size->surface_size.width;
3950         input.surface_size.height = plane_size->surface_size.height;
3951         input.swizzle_mode = tiling_info->gfx9.swizzle;
3952
3953         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3954                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3955         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3956                 input.scan = SCAN_DIRECTION_VERTICAL;
3957
3958         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3959                 return -EINVAL;
3960
3961         if (!output.capable)
3962                 return -EINVAL;
3963
3964         if (dcc->independent_64b_blks == 0 &&
3965             output.grph.rgb.independent_64b_blks != 0)
3966                 return -EINVAL;
3967
3968         return 0;
3969 }
3970
3971 static bool
3972 modifier_has_dcc(uint64_t modifier)
3973 {
3974         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3975 }
3976
3977 static unsigned
3978 modifier_gfx9_swizzle_mode(uint64_t modifier)
3979 {
3980         if (modifier == DRM_FORMAT_MOD_LINEAR)
3981                 return 0;
3982
3983         return AMD_FMT_MOD_GET(TILE, modifier);
3984 }
3985
3986 static const struct drm_format_info *
3987 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3988 {
3989         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3990 }
3991
3992 static void
3993 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3994                                     union dc_tiling_info *tiling_info,
3995                                     uint64_t modifier)
3996 {
3997         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3998         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3999         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4000         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4001
4002         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4003
4004         if (!IS_AMD_FMT_MOD(modifier))
4005                 return;
4006
4007         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4008         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4009
4010         if (adev->family >= AMDGPU_FAMILY_NV) {
4011                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4012         } else {
4013                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4014
4015                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4016         }
4017 }
4018
4019 enum dm_micro_swizzle {
4020         MICRO_SWIZZLE_Z = 0,
4021         MICRO_SWIZZLE_S = 1,
4022         MICRO_SWIZZLE_D = 2,
4023         MICRO_SWIZZLE_R = 3
4024 };
4025
4026 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4027                                           uint32_t format,
4028                                           uint64_t modifier)
4029 {
4030         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4031         const struct drm_format_info *info = drm_format_info(format);
4032
4033         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4034
4035         if (!info)
4036                 return false;
4037
4038         /*
4039          * We always have to allow this modifier, because core DRM still
4040          * checks LINEAR support if userspace does not provide modifers.
4041          */
4042         if (modifier == DRM_FORMAT_MOD_LINEAR)
4043                 return true;
4044
4045         /*
4046          * The arbitrary tiling support for multiplane formats has not been hooked
4047          * up.
4048          */
4049         if (info->num_planes > 1)
4050                 return false;
4051
4052         /*
4053          * For D swizzle the canonical modifier depends on the bpp, so check
4054          * it here.
4055          */
4056         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4057             adev->family >= AMDGPU_FAMILY_NV) {
4058                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4059                         return false;
4060         }
4061
4062         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4063             info->cpp[0] < 8)
4064                 return false;
4065
4066         if (modifier_has_dcc(modifier)) {
4067                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4068                 if (info->cpp[0] != 4)
4069                         return false;
4070         }
4071
4072         return true;
4073 }
4074
4075 static void
4076 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4077 {
4078         if (!*mods)
4079                 return;
4080
4081         if (*cap - *size < 1) {
4082                 uint64_t new_cap = *cap * 2;
4083                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4084
4085                 if (!new_mods) {
4086                         kfree(*mods);
4087                         *mods = NULL;
4088                         return;
4089                 }
4090
4091                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4092                 kfree(*mods);
4093                 *mods = new_mods;
4094                 *cap = new_cap;
4095         }
4096
4097         (*mods)[*size] = mod;
4098         *size += 1;
4099 }
4100
4101 static void
4102 add_gfx9_modifiers(const struct amdgpu_device *adev,
4103                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4104 {
4105         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4106         int pipe_xor_bits = min(8, pipes +
4107                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4108         int bank_xor_bits = min(8 - pipe_xor_bits,
4109                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4110         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4111                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4112
4113
4114         if (adev->family == AMDGPU_FAMILY_RV) {
4115                 /* Raven2 and later */
4116                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4117
4118                 /*
4119                  * No _D DCC swizzles yet because we only allow 32bpp, which
4120                  * doesn't support _D on DCN
4121                  */
4122
4123                 if (has_constant_encode) {
4124                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4125                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4126                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4127                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4128                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4129                                     AMD_FMT_MOD_SET(DCC, 1) |
4130                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4131                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4132                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4133                 }
4134
4135                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4136                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4137                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4138                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4139                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4140                             AMD_FMT_MOD_SET(DCC, 1) |
4141                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4142                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4143                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4144
4145                 if (has_constant_encode) {
4146                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4147                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4148                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4149                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4150                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4151                                     AMD_FMT_MOD_SET(DCC, 1) |
4152                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4153                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4154                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4155
4156                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4157                                     AMD_FMT_MOD_SET(RB, rb) |
4158                                     AMD_FMT_MOD_SET(PIPE, pipes));
4159                 }
4160
4161                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4162                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4163                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4164                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4165                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4166                             AMD_FMT_MOD_SET(DCC, 1) |
4167                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4168                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4169                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4170                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4171                             AMD_FMT_MOD_SET(RB, rb) |
4172                             AMD_FMT_MOD_SET(PIPE, pipes));
4173         }
4174
4175         /*
4176          * Only supported for 64bpp on Raven, will be filtered on format in
4177          * dm_plane_format_mod_supported.
4178          */
4179         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4180                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4181                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4182                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4183                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4184
4185         if (adev->family == AMDGPU_FAMILY_RV) {
4186                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4187                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4188                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4189                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4190                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4191         }
4192
4193         /*
4194          * Only supported for 64bpp on Raven, will be filtered on format in
4195          * dm_plane_format_mod_supported.
4196          */
4197         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4198                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4199                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4200
4201         if (adev->family == AMDGPU_FAMILY_RV) {
4202                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4203                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4204                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4205         }
4206 }
4207
4208 static void
4209 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4210                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4211 {
4212         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4213
4214         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4215                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4216                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4217                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4218                     AMD_FMT_MOD_SET(DCC, 1) |
4219                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4220                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4221                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4222
4223         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4224                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4225                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4226                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4227                     AMD_FMT_MOD_SET(DCC, 1) |
4228                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4229                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4230                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4231                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4232
4233         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4234                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4235                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4236                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4237
4238         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4239                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4240                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4241                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4242
4243
4244         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4245         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4246                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4247                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4248
4249         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4250                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4251                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4252 }
4253
4254 static void
4255 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4256                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4257 {
4258         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4259         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4260
4261         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4262                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4263                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4264                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4265                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4266                     AMD_FMT_MOD_SET(DCC, 1) |
4267                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4268                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4269                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4270                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4271
4272         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4273                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4274                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4275                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4276                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4277                     AMD_FMT_MOD_SET(DCC, 1) |
4278                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4279                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4280                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4281                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4282                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4283
4284         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4285                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4286                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4287                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4288                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4289
4290         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4291                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4292                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4293                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4294                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4295
4296         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4297         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4298                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4299                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4300
4301         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4302                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4303                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4304 }
4305
4306 static int
4307 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4308 {
4309         uint64_t size = 0, capacity = 128;
4310         *mods = NULL;
4311
4312         /* We have not hooked up any pre-GFX9 modifiers. */
4313         if (adev->family < AMDGPU_FAMILY_AI)
4314                 return 0;
4315
4316         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4317
4318         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4319                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4320                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4321                 return *mods ? 0 : -ENOMEM;
4322         }
4323
4324         switch (adev->family) {
4325         case AMDGPU_FAMILY_AI:
4326         case AMDGPU_FAMILY_RV:
4327                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4328                 break;
4329         case AMDGPU_FAMILY_NV:
4330         case AMDGPU_FAMILY_VGH:
4331                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4332                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4333                 else
4334                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4335                 break;
4336         }
4337
4338         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4339
4340         /* INVALID marks the end of the list. */
4341         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4342
4343         if (!*mods)
4344                 return -ENOMEM;
4345
4346         return 0;
4347 }
4348
4349 static int
4350 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4351                                           const struct amdgpu_framebuffer *afb,
4352                                           const enum surface_pixel_format format,
4353                                           const enum dc_rotation_angle rotation,
4354                                           const struct plane_size *plane_size,
4355                                           union dc_tiling_info *tiling_info,
4356                                           struct dc_plane_dcc_param *dcc,
4357                                           struct dc_plane_address *address,
4358                                           const bool force_disable_dcc)
4359 {
4360         const uint64_t modifier = afb->base.modifier;
4361         int ret;
4362
4363         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4364         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4365
4366         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4367                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4368
4369                 dcc->enable = 1;
4370                 dcc->meta_pitch = afb->base.pitches[1];
4371                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4372
4373                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4374                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4375         }
4376
4377         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4378         if (ret)
4379                 return ret;
4380
4381         return 0;
4382 }
4383
4384 static int
4385 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4386                              const struct amdgpu_framebuffer *afb,
4387                              const enum surface_pixel_format format,
4388                              const enum dc_rotation_angle rotation,
4389                              const uint64_t tiling_flags,
4390                              union dc_tiling_info *tiling_info,
4391                              struct plane_size *plane_size,
4392                              struct dc_plane_dcc_param *dcc,
4393                              struct dc_plane_address *address,
4394                              bool tmz_surface,
4395                              bool force_disable_dcc)
4396 {
4397         const struct drm_framebuffer *fb = &afb->base;
4398         int ret;
4399
4400         memset(tiling_info, 0, sizeof(*tiling_info));
4401         memset(plane_size, 0, sizeof(*plane_size));
4402         memset(dcc, 0, sizeof(*dcc));
4403         memset(address, 0, sizeof(*address));
4404
4405         address->tmz_surface = tmz_surface;
4406
4407         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4408                 uint64_t addr = afb->address + fb->offsets[0];
4409
4410                 plane_size->surface_size.x = 0;
4411                 plane_size->surface_size.y = 0;
4412                 plane_size->surface_size.width = fb->width;
4413                 plane_size->surface_size.height = fb->height;
4414                 plane_size->surface_pitch =
4415                         fb->pitches[0] / fb->format->cpp[0];
4416
4417                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4418                 address->grph.addr.low_part = lower_32_bits(addr);
4419                 address->grph.addr.high_part = upper_32_bits(addr);
4420         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4421                 uint64_t luma_addr = afb->address + fb->offsets[0];
4422                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4423
4424                 plane_size->surface_size.x = 0;
4425                 plane_size->surface_size.y = 0;
4426                 plane_size->surface_size.width = fb->width;
4427                 plane_size->surface_size.height = fb->height;
4428                 plane_size->surface_pitch =
4429                         fb->pitches[0] / fb->format->cpp[0];
4430
4431                 plane_size->chroma_size.x = 0;
4432                 plane_size->chroma_size.y = 0;
4433                 /* TODO: set these based on surface format */
4434                 plane_size->chroma_size.width = fb->width / 2;
4435                 plane_size->chroma_size.height = fb->height / 2;
4436
4437                 plane_size->chroma_pitch =
4438                         fb->pitches[1] / fb->format->cpp[1];
4439
4440                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4441                 address->video_progressive.luma_addr.low_part =
4442                         lower_32_bits(luma_addr);
4443                 address->video_progressive.luma_addr.high_part =
4444                         upper_32_bits(luma_addr);
4445                 address->video_progressive.chroma_addr.low_part =
4446                         lower_32_bits(chroma_addr);
4447                 address->video_progressive.chroma_addr.high_part =
4448                         upper_32_bits(chroma_addr);
4449         }
4450
4451         if (adev->family >= AMDGPU_FAMILY_AI) {
4452                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4453                                                                 rotation, plane_size,
4454                                                                 tiling_info, dcc,
4455                                                                 address,
4456                                                                 force_disable_dcc);
4457                 if (ret)
4458                         return ret;
4459         } else {
4460                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4461         }
4462
4463         return 0;
4464 }
4465
4466 static void
4467 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4468                                bool *per_pixel_alpha, bool *global_alpha,
4469                                int *global_alpha_value)
4470 {
4471         *per_pixel_alpha = false;
4472         *global_alpha = false;
4473         *global_alpha_value = 0xff;
4474
4475         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4476                 return;
4477
4478         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4479                 static const uint32_t alpha_formats[] = {
4480                         DRM_FORMAT_ARGB8888,
4481                         DRM_FORMAT_RGBA8888,
4482                         DRM_FORMAT_ABGR8888,
4483                 };
4484                 uint32_t format = plane_state->fb->format->format;
4485                 unsigned int i;
4486
4487                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4488                         if (format == alpha_formats[i]) {
4489                                 *per_pixel_alpha = true;
4490                                 break;
4491                         }
4492                 }
4493         }
4494
4495         if (plane_state->alpha < 0xffff) {
4496                 *global_alpha = true;
4497                 *global_alpha_value = plane_state->alpha >> 8;
4498         }
4499 }
4500
4501 static int
4502 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4503                             const enum surface_pixel_format format,
4504                             enum dc_color_space *color_space)
4505 {
4506         bool full_range;
4507
4508         *color_space = COLOR_SPACE_SRGB;
4509
4510         /* DRM color properties only affect non-RGB formats. */
4511         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4512                 return 0;
4513
4514         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4515
4516         switch (plane_state->color_encoding) {
4517         case DRM_COLOR_YCBCR_BT601:
4518                 if (full_range)
4519                         *color_space = COLOR_SPACE_YCBCR601;
4520                 else
4521                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4522                 break;
4523
4524         case DRM_COLOR_YCBCR_BT709:
4525                 if (full_range)
4526                         *color_space = COLOR_SPACE_YCBCR709;
4527                 else
4528                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4529                 break;
4530
4531         case DRM_COLOR_YCBCR_BT2020:
4532                 if (full_range)
4533                         *color_space = COLOR_SPACE_2020_YCBCR;
4534                 else
4535                         return -EINVAL;
4536                 break;
4537
4538         default:
4539                 return -EINVAL;
4540         }
4541
4542         return 0;
4543 }
4544
4545 static int
4546 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4547                             const struct drm_plane_state *plane_state,
4548                             const uint64_t tiling_flags,
4549                             struct dc_plane_info *plane_info,
4550                             struct dc_plane_address *address,
4551                             bool tmz_surface,
4552                             bool force_disable_dcc)
4553 {
4554         const struct drm_framebuffer *fb = plane_state->fb;
4555         const struct amdgpu_framebuffer *afb =
4556                 to_amdgpu_framebuffer(plane_state->fb);
4557         struct drm_format_name_buf format_name;
4558         int ret;
4559
4560         memset(plane_info, 0, sizeof(*plane_info));
4561
4562         switch (fb->format->format) {
4563         case DRM_FORMAT_C8:
4564                 plane_info->format =
4565                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4566                 break;
4567         case DRM_FORMAT_RGB565:
4568                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4569                 break;
4570         case DRM_FORMAT_XRGB8888:
4571         case DRM_FORMAT_ARGB8888:
4572                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4573                 break;
4574         case DRM_FORMAT_XRGB2101010:
4575         case DRM_FORMAT_ARGB2101010:
4576                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4577                 break;
4578         case DRM_FORMAT_XBGR2101010:
4579         case DRM_FORMAT_ABGR2101010:
4580                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4581                 break;
4582         case DRM_FORMAT_XBGR8888:
4583         case DRM_FORMAT_ABGR8888:
4584                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4585                 break;
4586         case DRM_FORMAT_NV21:
4587                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4588                 break;
4589         case DRM_FORMAT_NV12:
4590                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4591                 break;
4592         case DRM_FORMAT_P010:
4593                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4594                 break;
4595         case DRM_FORMAT_XRGB16161616F:
4596         case DRM_FORMAT_ARGB16161616F:
4597                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4598                 break;
4599         case DRM_FORMAT_XBGR16161616F:
4600         case DRM_FORMAT_ABGR16161616F:
4601                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4602                 break;
4603         default:
4604                 DRM_ERROR(
4605                         "Unsupported screen format %s\n",
4606                         drm_get_format_name(fb->format->format, &format_name));
4607                 return -EINVAL;
4608         }
4609
4610         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4611         case DRM_MODE_ROTATE_0:
4612                 plane_info->rotation = ROTATION_ANGLE_0;
4613                 break;
4614         case DRM_MODE_ROTATE_90:
4615                 plane_info->rotation = ROTATION_ANGLE_90;
4616                 break;
4617         case DRM_MODE_ROTATE_180:
4618                 plane_info->rotation = ROTATION_ANGLE_180;
4619                 break;
4620         case DRM_MODE_ROTATE_270:
4621                 plane_info->rotation = ROTATION_ANGLE_270;
4622                 break;
4623         default:
4624                 plane_info->rotation = ROTATION_ANGLE_0;
4625                 break;
4626         }
4627
4628         plane_info->visible = true;
4629         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4630
4631         plane_info->layer_index = 0;
4632
4633         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4634                                           &plane_info->color_space);
4635         if (ret)
4636                 return ret;
4637
4638         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4639                                            plane_info->rotation, tiling_flags,
4640                                            &plane_info->tiling_info,
4641                                            &plane_info->plane_size,
4642                                            &plane_info->dcc, address, tmz_surface,
4643                                            force_disable_dcc);
4644         if (ret)
4645                 return ret;
4646
4647         fill_blending_from_plane_state(
4648                 plane_state, &plane_info->per_pixel_alpha,
4649                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4650
4651         return 0;
4652 }
4653
4654 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4655                                     struct dc_plane_state *dc_plane_state,
4656                                     struct drm_plane_state *plane_state,
4657                                     struct drm_crtc_state *crtc_state)
4658 {
4659         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4660         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4661         struct dc_scaling_info scaling_info;
4662         struct dc_plane_info plane_info;
4663         int ret;
4664         bool force_disable_dcc = false;
4665
4666         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4667         if (ret)
4668                 return ret;
4669
4670         dc_plane_state->src_rect = scaling_info.src_rect;
4671         dc_plane_state->dst_rect = scaling_info.dst_rect;
4672         dc_plane_state->clip_rect = scaling_info.clip_rect;
4673         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4674
4675         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4676         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4677                                           afb->tiling_flags,
4678                                           &plane_info,
4679                                           &dc_plane_state->address,
4680                                           afb->tmz_surface,
4681                                           force_disable_dcc);
4682         if (ret)
4683                 return ret;
4684
4685         dc_plane_state->format = plane_info.format;
4686         dc_plane_state->color_space = plane_info.color_space;
4687         dc_plane_state->format = plane_info.format;
4688         dc_plane_state->plane_size = plane_info.plane_size;
4689         dc_plane_state->rotation = plane_info.rotation;
4690         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4691         dc_plane_state->stereo_format = plane_info.stereo_format;
4692         dc_plane_state->tiling_info = plane_info.tiling_info;
4693         dc_plane_state->visible = plane_info.visible;
4694         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4695         dc_plane_state->global_alpha = plane_info.global_alpha;
4696         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4697         dc_plane_state->dcc = plane_info.dcc;
4698         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4699
4700         /*
4701          * Always set input transfer function, since plane state is refreshed
4702          * every time.
4703          */
4704         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4705         if (ret)
4706                 return ret;
4707
4708         return 0;
4709 }
4710
4711 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4712                                            const struct dm_connector_state *dm_state,
4713                                            struct dc_stream_state *stream)
4714 {
4715         enum amdgpu_rmx_type rmx_type;
4716
4717         struct rect src = { 0 }; /* viewport in composition space*/
4718         struct rect dst = { 0 }; /* stream addressable area */
4719
4720         /* no mode. nothing to be done */
4721         if (!mode)
4722                 return;
4723
4724         /* Full screen scaling by default */
4725         src.width = mode->hdisplay;
4726         src.height = mode->vdisplay;
4727         dst.width = stream->timing.h_addressable;
4728         dst.height = stream->timing.v_addressable;
4729
4730         if (dm_state) {
4731                 rmx_type = dm_state->scaling;
4732                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4733                         if (src.width * dst.height <
4734                                         src.height * dst.width) {
4735                                 /* height needs less upscaling/more downscaling */
4736                                 dst.width = src.width *
4737                                                 dst.height / src.height;
4738                         } else {
4739                                 /* width needs less upscaling/more downscaling */
4740                                 dst.height = src.height *
4741                                                 dst.width / src.width;
4742                         }
4743                 } else if (rmx_type == RMX_CENTER) {
4744                         dst = src;
4745                 }
4746
4747                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4748                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4749
4750                 if (dm_state->underscan_enable) {
4751                         dst.x += dm_state->underscan_hborder / 2;
4752                         dst.y += dm_state->underscan_vborder / 2;
4753                         dst.width -= dm_state->underscan_hborder;
4754                         dst.height -= dm_state->underscan_vborder;
4755                 }
4756         }
4757
4758         stream->src = src;
4759         stream->dst = dst;
4760
4761         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4762                         dst.x, dst.y, dst.width, dst.height);
4763
4764 }
4765
4766 static enum dc_color_depth
4767 convert_color_depth_from_display_info(const struct drm_connector *connector,
4768                                       bool is_y420, int requested_bpc)
4769 {
4770         uint8_t bpc;
4771
4772         if (is_y420) {
4773                 bpc = 8;
4774
4775                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4776                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4777                         bpc = 16;
4778                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4779                         bpc = 12;
4780                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4781                         bpc = 10;
4782         } else {
4783                 bpc = (uint8_t)connector->display_info.bpc;
4784                 /* Assume 8 bpc by default if no bpc is specified. */
4785                 bpc = bpc ? bpc : 8;
4786         }
4787
4788         if (requested_bpc > 0) {
4789                 /*
4790                  * Cap display bpc based on the user requested value.
4791                  *
4792                  * The value for state->max_bpc may not correctly updated
4793                  * depending on when the connector gets added to the state
4794                  * or if this was called outside of atomic check, so it
4795                  * can't be used directly.
4796                  */
4797                 bpc = min_t(u8, bpc, requested_bpc);
4798
4799                 /* Round down to the nearest even number. */
4800                 bpc = bpc - (bpc & 1);
4801         }
4802
4803         switch (bpc) {
4804         case 0:
4805                 /*
4806                  * Temporary Work around, DRM doesn't parse color depth for
4807                  * EDID revision before 1.4
4808                  * TODO: Fix edid parsing
4809                  */
4810                 return COLOR_DEPTH_888;
4811         case 6:
4812                 return COLOR_DEPTH_666;
4813         case 8:
4814                 return COLOR_DEPTH_888;
4815         case 10:
4816                 return COLOR_DEPTH_101010;
4817         case 12:
4818                 return COLOR_DEPTH_121212;
4819         case 14:
4820                 return COLOR_DEPTH_141414;
4821         case 16:
4822                 return COLOR_DEPTH_161616;
4823         default:
4824                 return COLOR_DEPTH_UNDEFINED;
4825         }
4826 }
4827
4828 static enum dc_aspect_ratio
4829 get_aspect_ratio(const struct drm_display_mode *mode_in)
4830 {
4831         /* 1-1 mapping, since both enums follow the HDMI spec. */
4832         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4833 }
4834
4835 static enum dc_color_space
4836 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4837 {
4838         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4839
4840         switch (dc_crtc_timing->pixel_encoding) {
4841         case PIXEL_ENCODING_YCBCR422:
4842         case PIXEL_ENCODING_YCBCR444:
4843         case PIXEL_ENCODING_YCBCR420:
4844         {
4845                 /*
4846                  * 27030khz is the separation point between HDTV and SDTV
4847                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4848                  * respectively
4849                  */
4850                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4851                         if (dc_crtc_timing->flags.Y_ONLY)
4852                                 color_space =
4853                                         COLOR_SPACE_YCBCR709_LIMITED;
4854                         else
4855                                 color_space = COLOR_SPACE_YCBCR709;
4856                 } else {
4857                         if (dc_crtc_timing->flags.Y_ONLY)
4858                                 color_space =
4859                                         COLOR_SPACE_YCBCR601_LIMITED;
4860                         else
4861                                 color_space = COLOR_SPACE_YCBCR601;
4862                 }
4863
4864         }
4865         break;
4866         case PIXEL_ENCODING_RGB:
4867                 color_space = COLOR_SPACE_SRGB;
4868                 break;
4869
4870         default:
4871                 WARN_ON(1);
4872                 break;
4873         }
4874
4875         return color_space;
4876 }
4877
4878 static bool adjust_colour_depth_from_display_info(
4879         struct dc_crtc_timing *timing_out,
4880         const struct drm_display_info *info)
4881 {
4882         enum dc_color_depth depth = timing_out->display_color_depth;
4883         int normalized_clk;
4884         do {
4885                 normalized_clk = timing_out->pix_clk_100hz / 10;
4886                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4887                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4888                         normalized_clk /= 2;
4889                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4890                 switch (depth) {
4891                 case COLOR_DEPTH_888:
4892                         break;
4893                 case COLOR_DEPTH_101010:
4894                         normalized_clk = (normalized_clk * 30) / 24;
4895                         break;
4896                 case COLOR_DEPTH_121212:
4897                         normalized_clk = (normalized_clk * 36) / 24;
4898                         break;
4899                 case COLOR_DEPTH_161616:
4900                         normalized_clk = (normalized_clk * 48) / 24;
4901                         break;
4902                 default:
4903                         /* The above depths are the only ones valid for HDMI. */
4904                         return false;
4905                 }
4906                 if (normalized_clk <= info->max_tmds_clock) {
4907                         timing_out->display_color_depth = depth;
4908                         return true;
4909                 }
4910         } while (--depth > COLOR_DEPTH_666);
4911         return false;
4912 }
4913
4914 static void fill_stream_properties_from_drm_display_mode(
4915         struct dc_stream_state *stream,
4916         const struct drm_display_mode *mode_in,
4917         const struct drm_connector *connector,
4918         const struct drm_connector_state *connector_state,
4919         const struct dc_stream_state *old_stream,
4920         int requested_bpc)
4921 {
4922         struct dc_crtc_timing *timing_out = &stream->timing;
4923         const struct drm_display_info *info = &connector->display_info;
4924         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4925         struct hdmi_vendor_infoframe hv_frame;
4926         struct hdmi_avi_infoframe avi_frame;
4927
4928         memset(&hv_frame, 0, sizeof(hv_frame));
4929         memset(&avi_frame, 0, sizeof(avi_frame));
4930
4931         timing_out->h_border_left = 0;
4932         timing_out->h_border_right = 0;
4933         timing_out->v_border_top = 0;
4934         timing_out->v_border_bottom = 0;
4935         /* TODO: un-hardcode */
4936         if (drm_mode_is_420_only(info, mode_in)
4937                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4938                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4939         else if (drm_mode_is_420_also(info, mode_in)
4940                         && aconnector->force_yuv420_output)
4941                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4942         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4943                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4944                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4945         else
4946                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4947
4948         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4949         timing_out->display_color_depth = convert_color_depth_from_display_info(
4950                 connector,
4951                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4952                 requested_bpc);
4953         timing_out->scan_type = SCANNING_TYPE_NODATA;
4954         timing_out->hdmi_vic = 0;
4955
4956         if(old_stream) {
4957                 timing_out->vic = old_stream->timing.vic;
4958                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4959                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4960         } else {
4961                 timing_out->vic = drm_match_cea_mode(mode_in);
4962                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4963                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4964                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4965                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4966         }
4967
4968         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4969                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4970                 timing_out->vic = avi_frame.video_code;
4971                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4972                 timing_out->hdmi_vic = hv_frame.vic;
4973         }
4974
4975         timing_out->h_addressable = mode_in->crtc_hdisplay;
4976         timing_out->h_total = mode_in->crtc_htotal;
4977         timing_out->h_sync_width =
4978                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4979         timing_out->h_front_porch =
4980                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4981         timing_out->v_total = mode_in->crtc_vtotal;
4982         timing_out->v_addressable = mode_in->crtc_vdisplay;
4983         timing_out->v_front_porch =
4984                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4985         timing_out->v_sync_width =
4986                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4987         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4988         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4989
4990         stream->output_color_space = get_output_color_space(timing_out);
4991
4992         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4993         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4994         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4995                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4996                     drm_mode_is_420_also(info, mode_in) &&
4997                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4998                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4999                         adjust_colour_depth_from_display_info(timing_out, info);
5000                 }
5001         }
5002 }
5003
5004 static void fill_audio_info(struct audio_info *audio_info,
5005                             const struct drm_connector *drm_connector,
5006                             const struct dc_sink *dc_sink)
5007 {
5008         int i = 0;
5009         int cea_revision = 0;
5010         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5011
5012         audio_info->manufacture_id = edid_caps->manufacturer_id;
5013         audio_info->product_id = edid_caps->product_id;
5014
5015         cea_revision = drm_connector->display_info.cea_rev;
5016
5017         strscpy(audio_info->display_name,
5018                 edid_caps->display_name,
5019                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5020
5021         if (cea_revision >= 3) {
5022                 audio_info->mode_count = edid_caps->audio_mode_count;
5023
5024                 for (i = 0; i < audio_info->mode_count; ++i) {
5025                         audio_info->modes[i].format_code =
5026                                         (enum audio_format_code)
5027                                         (edid_caps->audio_modes[i].format_code);
5028                         audio_info->modes[i].channel_count =
5029                                         edid_caps->audio_modes[i].channel_count;
5030                         audio_info->modes[i].sample_rates.all =
5031                                         edid_caps->audio_modes[i].sample_rate;
5032                         audio_info->modes[i].sample_size =
5033                                         edid_caps->audio_modes[i].sample_size;
5034                 }
5035         }
5036
5037         audio_info->flags.all = edid_caps->speaker_flags;
5038
5039         /* TODO: We only check for the progressive mode, check for interlace mode too */
5040         if (drm_connector->latency_present[0]) {
5041                 audio_info->video_latency = drm_connector->video_latency[0];
5042                 audio_info->audio_latency = drm_connector->audio_latency[0];
5043         }
5044
5045         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5046
5047 }
5048
5049 static void
5050 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5051                                       struct drm_display_mode *dst_mode)
5052 {
5053         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5054         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5055         dst_mode->crtc_clock = src_mode->crtc_clock;
5056         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5057         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5058         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5059         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5060         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5061         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5062         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5063         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5064         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5065         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5066         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5067 }
5068
5069 static void
5070 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5071                                         const struct drm_display_mode *native_mode,
5072                                         bool scale_enabled)
5073 {
5074         if (scale_enabled) {
5075                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5076         } else if (native_mode->clock == drm_mode->clock &&
5077                         native_mode->htotal == drm_mode->htotal &&
5078                         native_mode->vtotal == drm_mode->vtotal) {
5079                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5080         } else {
5081                 /* no scaling nor amdgpu inserted, no need to patch */
5082         }
5083 }
5084
5085 static struct dc_sink *
5086 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5087 {
5088         struct dc_sink_init_data sink_init_data = { 0 };
5089         struct dc_sink *sink = NULL;
5090         sink_init_data.link = aconnector->dc_link;
5091         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5092
5093         sink = dc_sink_create(&sink_init_data);
5094         if (!sink) {
5095                 DRM_ERROR("Failed to create sink!\n");
5096                 return NULL;
5097         }
5098         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5099
5100         return sink;
5101 }
5102
5103 static void set_multisync_trigger_params(
5104                 struct dc_stream_state *stream)
5105 {
5106         if (stream->triggered_crtc_reset.enabled) {
5107                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5108                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5109         }
5110 }
5111
5112 static void set_master_stream(struct dc_stream_state *stream_set[],
5113                               int stream_count)
5114 {
5115         int j, highest_rfr = 0, master_stream = 0;
5116
5117         for (j = 0;  j < stream_count; j++) {
5118                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5119                         int refresh_rate = 0;
5120
5121                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5122                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5123                         if (refresh_rate > highest_rfr) {
5124                                 highest_rfr = refresh_rate;
5125                                 master_stream = j;
5126                         }
5127                 }
5128         }
5129         for (j = 0;  j < stream_count; j++) {
5130                 if (stream_set[j])
5131                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5132         }
5133 }
5134
5135 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5136 {
5137         int i = 0;
5138
5139         if (context->stream_count < 2)
5140                 return;
5141         for (i = 0; i < context->stream_count ; i++) {
5142                 if (!context->streams[i])
5143                         continue;
5144                 /*
5145                  * TODO: add a function to read AMD VSDB bits and set
5146                  * crtc_sync_master.multi_sync_enabled flag
5147                  * For now it's set to false
5148                  */
5149                 set_multisync_trigger_params(context->streams[i]);
5150         }
5151         set_master_stream(context->streams, context->stream_count);
5152 }
5153
5154 static struct dc_stream_state *
5155 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5156                        const struct drm_display_mode *drm_mode,
5157                        const struct dm_connector_state *dm_state,
5158                        const struct dc_stream_state *old_stream,
5159                        int requested_bpc)
5160 {
5161         struct drm_display_mode *preferred_mode = NULL;
5162         struct drm_connector *drm_connector;
5163         const struct drm_connector_state *con_state =
5164                 dm_state ? &dm_state->base : NULL;
5165         struct dc_stream_state *stream = NULL;
5166         struct drm_display_mode mode = *drm_mode;
5167         bool native_mode_found = false;
5168         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5169         int mode_refresh;
5170         int preferred_refresh = 0;
5171 #if defined(CONFIG_DRM_AMD_DC_DCN)
5172         struct dsc_dec_dpcd_caps dsc_caps;
5173         uint32_t link_bandwidth_kbps;
5174 #endif
5175         struct dc_sink *sink = NULL;
5176         if (aconnector == NULL) {
5177                 DRM_ERROR("aconnector is NULL!\n");
5178                 return stream;
5179         }
5180
5181         drm_connector = &aconnector->base;
5182
5183         if (!aconnector->dc_sink) {
5184                 sink = create_fake_sink(aconnector);
5185                 if (!sink)
5186                         return stream;
5187         } else {
5188                 sink = aconnector->dc_sink;
5189                 dc_sink_retain(sink);
5190         }
5191
5192         stream = dc_create_stream_for_sink(sink);
5193
5194         if (stream == NULL) {
5195                 DRM_ERROR("Failed to create stream for sink!\n");
5196                 goto finish;
5197         }
5198
5199         stream->dm_stream_context = aconnector;
5200
5201         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5202                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5203
5204         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5205                 /* Search for preferred mode */
5206                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5207                         native_mode_found = true;
5208                         break;
5209                 }
5210         }
5211         if (!native_mode_found)
5212                 preferred_mode = list_first_entry_or_null(
5213                                 &aconnector->base.modes,
5214                                 struct drm_display_mode,
5215                                 head);
5216
5217         mode_refresh = drm_mode_vrefresh(&mode);
5218
5219         if (preferred_mode == NULL) {
5220                 /*
5221                  * This may not be an error, the use case is when we have no
5222                  * usermode calls to reset and set mode upon hotplug. In this
5223                  * case, we call set mode ourselves to restore the previous mode
5224                  * and the modelist may not be filled in in time.
5225                  */
5226                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5227         } else {
5228                 decide_crtc_timing_for_drm_display_mode(
5229                                 &mode, preferred_mode,
5230                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5231                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5232         }
5233
5234         if (!dm_state)
5235                 drm_mode_set_crtcinfo(&mode, 0);
5236
5237         /*
5238         * If scaling is enabled and refresh rate didn't change
5239         * we copy the vic and polarities of the old timings
5240         */
5241         if (!scale || mode_refresh != preferred_refresh)
5242                 fill_stream_properties_from_drm_display_mode(stream,
5243                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
5244         else
5245                 fill_stream_properties_from_drm_display_mode(stream,
5246                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5247
5248         stream->timing.flags.DSC = 0;
5249
5250         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5251 #if defined(CONFIG_DRM_AMD_DC_DCN)
5252                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5253                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5254                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5255                                       &dsc_caps);
5256                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5257                                                              dc_link_get_link_cap(aconnector->dc_link));
5258
5259                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5260                         /* Set DSC policy according to dsc_clock_en */
5261                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5262                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5263
5264                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5265                                                   &dsc_caps,
5266                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5267                                                   0,
5268                                                   link_bandwidth_kbps,
5269                                                   &stream->timing,
5270                                                   &stream->timing.dsc_cfg))
5271                                 stream->timing.flags.DSC = 1;
5272                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5273                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5274                                 stream->timing.flags.DSC = 1;
5275
5276                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5277                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5278
5279                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5280                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5281
5282                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5283                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5284                 }
5285 #endif
5286         }
5287
5288         update_stream_scaling_settings(&mode, dm_state, stream);
5289
5290         fill_audio_info(
5291                 &stream->audio_info,
5292                 drm_connector,
5293                 sink);
5294
5295         update_stream_signal(stream, sink);
5296
5297         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5298                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5299
5300         if (stream->link->psr_settings.psr_feature_enabled) {
5301                 //
5302                 // should decide stream support vsc sdp colorimetry capability
5303                 // before building vsc info packet
5304                 //
5305                 stream->use_vsc_sdp_for_colorimetry = false;
5306                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5307                         stream->use_vsc_sdp_for_colorimetry =
5308                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5309                 } else {
5310                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5311                                 stream->use_vsc_sdp_for_colorimetry = true;
5312                 }
5313                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5314         }
5315 finish:
5316         dc_sink_release(sink);
5317
5318         return stream;
5319 }
5320
5321 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5322 {
5323         drm_crtc_cleanup(crtc);
5324         kfree(crtc);
5325 }
5326
5327 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5328                                   struct drm_crtc_state *state)
5329 {
5330         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5331
5332         /* TODO Destroy dc_stream objects are stream object is flattened */
5333         if (cur->stream)
5334                 dc_stream_release(cur->stream);
5335
5336
5337         __drm_atomic_helper_crtc_destroy_state(state);
5338
5339
5340         kfree(state);
5341 }
5342
5343 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5344 {
5345         struct dm_crtc_state *state;
5346
5347         if (crtc->state)
5348                 dm_crtc_destroy_state(crtc, crtc->state);
5349
5350         state = kzalloc(sizeof(*state), GFP_KERNEL);
5351         if (WARN_ON(!state))
5352                 return;
5353
5354         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5355 }
5356
5357 static struct drm_crtc_state *
5358 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5359 {
5360         struct dm_crtc_state *state, *cur;
5361
5362         cur = to_dm_crtc_state(crtc->state);
5363
5364         if (WARN_ON(!crtc->state))
5365                 return NULL;
5366
5367         state = kzalloc(sizeof(*state), GFP_KERNEL);
5368         if (!state)
5369                 return NULL;
5370
5371         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5372
5373         if (cur->stream) {
5374                 state->stream = cur->stream;
5375                 dc_stream_retain(state->stream);
5376         }
5377
5378         state->active_planes = cur->active_planes;
5379         state->vrr_infopacket = cur->vrr_infopacket;
5380         state->abm_level = cur->abm_level;
5381         state->vrr_supported = cur->vrr_supported;
5382         state->freesync_config = cur->freesync_config;
5383         state->crc_src = cur->crc_src;
5384         state->cm_has_degamma = cur->cm_has_degamma;
5385         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5386 #ifdef CONFIG_DEBUG_FS
5387         state->crc_window = cur->crc_window;
5388 #endif
5389         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5390
5391         return &state->base;
5392 }
5393
5394 #ifdef CONFIG_DEBUG_FS
5395 static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
5396                                             struct drm_crtc_state *crtc_state,
5397                                             struct drm_property *property,
5398                                             uint64_t val)
5399 {
5400         struct drm_device *dev = crtc->dev;
5401         struct amdgpu_device *adev = drm_to_adev(dev);
5402         struct dm_crtc_state *dm_new_state =
5403                 to_dm_crtc_state(crtc_state);
5404
5405         if (property == adev->dm.crc_win_x_start_property)
5406                 dm_new_state->crc_window.x_start = val;
5407         else if (property == adev->dm.crc_win_y_start_property)
5408                 dm_new_state->crc_window.y_start = val;
5409         else if (property == adev->dm.crc_win_x_end_property)
5410                 dm_new_state->crc_window.x_end = val;
5411         else if (property == adev->dm.crc_win_y_end_property)
5412                 dm_new_state->crc_window.y_end = val;
5413         else
5414                 return -EINVAL;
5415
5416         return 0;
5417 }
5418
5419 static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
5420                                             const struct drm_crtc_state *state,
5421                                             struct drm_property *property,
5422                                             uint64_t *val)
5423 {
5424         struct drm_device *dev = crtc->dev;
5425         struct amdgpu_device *adev = drm_to_adev(dev);
5426         struct dm_crtc_state *dm_state =
5427                 to_dm_crtc_state(state);
5428
5429         if (property == adev->dm.crc_win_x_start_property)
5430                 *val = dm_state->crc_window.x_start;
5431         else if (property == adev->dm.crc_win_y_start_property)
5432                 *val = dm_state->crc_window.y_start;
5433         else if (property == adev->dm.crc_win_x_end_property)
5434                 *val = dm_state->crc_window.x_end;
5435         else if (property == adev->dm.crc_win_y_end_property)
5436                 *val = dm_state->crc_window.y_end;
5437         else
5438                 return -EINVAL;
5439
5440         return 0;
5441 }
5442 #endif
5443
5444 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5445 {
5446         enum dc_irq_source irq_source;
5447         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5448         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5449         int rc;
5450
5451         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5452
5453         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5454
5455         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5456                          acrtc->crtc_id, enable ? "en" : "dis", rc);
5457         return rc;
5458 }
5459
5460 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5461 {
5462         enum dc_irq_source irq_source;
5463         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5464         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5465         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5466         struct amdgpu_display_manager *dm = &adev->dm;
5467         int rc = 0;
5468
5469         if (enable) {
5470                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5471                 if (amdgpu_dm_vrr_active(acrtc_state))
5472                         rc = dm_set_vupdate_irq(crtc, true);
5473         } else {
5474                 /* vblank irq off -> vupdate irq off */
5475                 rc = dm_set_vupdate_irq(crtc, false);
5476         }
5477
5478         if (rc)
5479                 return rc;
5480
5481         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5482
5483         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5484                 return -EBUSY;
5485
5486         mutex_lock(&dm->dc_lock);
5487
5488         if (enable)
5489                 dm->active_vblank_irq_count++;
5490         else
5491                 dm->active_vblank_irq_count--;
5492
5493 #if defined(CONFIG_DRM_AMD_DC_DCN)
5494         dc_allow_idle_optimizations(
5495                 adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
5496
5497         DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
5498 #endif
5499
5500         mutex_unlock(&dm->dc_lock);
5501
5502         return 0;
5503 }
5504
5505 static int dm_enable_vblank(struct drm_crtc *crtc)
5506 {
5507         return dm_set_vblank(crtc, true);
5508 }
5509
5510 static void dm_disable_vblank(struct drm_crtc *crtc)
5511 {
5512         dm_set_vblank(crtc, false);
5513 }
5514
5515 /* Implemented only the options currently availible for the driver */
5516 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5517         .reset = dm_crtc_reset_state,
5518         .destroy = amdgpu_dm_crtc_destroy,
5519         .gamma_set = drm_atomic_helper_legacy_gamma_set,
5520         .set_config = drm_atomic_helper_set_config,
5521         .page_flip = drm_atomic_helper_page_flip,
5522         .atomic_duplicate_state = dm_crtc_duplicate_state,
5523         .atomic_destroy_state = dm_crtc_destroy_state,
5524         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5525         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5526         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5527         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5528         .enable_vblank = dm_enable_vblank,
5529         .disable_vblank = dm_disable_vblank,
5530         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5531 #ifdef CONFIG_DEBUG_FS
5532         .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
5533         .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
5534 #endif
5535 };
5536
5537 static enum drm_connector_status
5538 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5539 {
5540         bool connected;
5541         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5542
5543         /*
5544          * Notes:
5545          * 1. This interface is NOT called in context of HPD irq.
5546          * 2. This interface *is called* in context of user-mode ioctl. Which
5547          * makes it a bad place for *any* MST-related activity.
5548          */
5549
5550         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5551             !aconnector->fake_enable)
5552                 connected = (aconnector->dc_sink != NULL);
5553         else
5554                 connected = (aconnector->base.force == DRM_FORCE_ON);
5555
5556         update_subconnector_property(aconnector);
5557
5558         return (connected ? connector_status_connected :
5559                         connector_status_disconnected);
5560 }
5561
5562 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5563                                             struct drm_connector_state *connector_state,
5564                                             struct drm_property *property,
5565                                             uint64_t val)
5566 {
5567         struct drm_device *dev = connector->dev;
5568         struct amdgpu_device *adev = drm_to_adev(dev);
5569         struct dm_connector_state *dm_old_state =
5570                 to_dm_connector_state(connector->state);
5571         struct dm_connector_state *dm_new_state =
5572                 to_dm_connector_state(connector_state);
5573
5574         int ret = -EINVAL;
5575
5576         if (property == dev->mode_config.scaling_mode_property) {
5577                 enum amdgpu_rmx_type rmx_type;
5578
5579                 switch (val) {
5580                 case DRM_MODE_SCALE_CENTER:
5581                         rmx_type = RMX_CENTER;
5582                         break;
5583                 case DRM_MODE_SCALE_ASPECT:
5584                         rmx_type = RMX_ASPECT;
5585                         break;
5586                 case DRM_MODE_SCALE_FULLSCREEN:
5587                         rmx_type = RMX_FULL;
5588                         break;
5589                 case DRM_MODE_SCALE_NONE:
5590                 default:
5591                         rmx_type = RMX_OFF;
5592                         break;
5593                 }
5594
5595                 if (dm_old_state->scaling == rmx_type)
5596                         return 0;
5597
5598                 dm_new_state->scaling = rmx_type;
5599                 ret = 0;
5600         } else if (property == adev->mode_info.underscan_hborder_property) {
5601                 dm_new_state->underscan_hborder = val;
5602                 ret = 0;
5603         } else if (property == adev->mode_info.underscan_vborder_property) {
5604                 dm_new_state->underscan_vborder = val;
5605                 ret = 0;
5606         } else if (property == adev->mode_info.underscan_property) {
5607                 dm_new_state->underscan_enable = val;
5608                 ret = 0;
5609         } else if (property == adev->mode_info.abm_level_property) {
5610                 dm_new_state->abm_level = val;
5611                 ret = 0;
5612         }
5613
5614         return ret;
5615 }
5616
5617 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5618                                             const struct drm_connector_state *state,
5619                                             struct drm_property *property,
5620                                             uint64_t *val)
5621 {
5622         struct drm_device *dev = connector->dev;
5623         struct amdgpu_device *adev = drm_to_adev(dev);
5624         struct dm_connector_state *dm_state =
5625                 to_dm_connector_state(state);
5626         int ret = -EINVAL;
5627
5628         if (property == dev->mode_config.scaling_mode_property) {
5629                 switch (dm_state->scaling) {
5630                 case RMX_CENTER:
5631                         *val = DRM_MODE_SCALE_CENTER;
5632                         break;
5633                 case RMX_ASPECT:
5634                         *val = DRM_MODE_SCALE_ASPECT;
5635                         break;
5636                 case RMX_FULL:
5637                         *val = DRM_MODE_SCALE_FULLSCREEN;
5638                         break;
5639                 case RMX_OFF:
5640                 default:
5641                         *val = DRM_MODE_SCALE_NONE;
5642                         break;
5643                 }
5644                 ret = 0;
5645         } else if (property == adev->mode_info.underscan_hborder_property) {
5646                 *val = dm_state->underscan_hborder;
5647                 ret = 0;
5648         } else if (property == adev->mode_info.underscan_vborder_property) {
5649                 *val = dm_state->underscan_vborder;
5650                 ret = 0;
5651         } else if (property == adev->mode_info.underscan_property) {
5652                 *val = dm_state->underscan_enable;
5653                 ret = 0;
5654         } else if (property == adev->mode_info.abm_level_property) {
5655                 *val = dm_state->abm_level;
5656                 ret = 0;
5657         }
5658
5659         return ret;
5660 }
5661
5662 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5663 {
5664         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5665
5666         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5667 }
5668
5669 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5670 {
5671         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5672         const struct dc_link *link = aconnector->dc_link;
5673         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5674         struct amdgpu_display_manager *dm = &adev->dm;
5675
5676         /*
5677          * Call only if mst_mgr was iniitalized before since it's not done
5678          * for all connector types.
5679          */
5680         if (aconnector->mst_mgr.dev)
5681                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5682
5683 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5684         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5685
5686         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5687             link->type != dc_connection_none &&
5688             dm->backlight_dev) {
5689                 backlight_device_unregister(dm->backlight_dev);
5690                 dm->backlight_dev = NULL;
5691         }
5692 #endif
5693
5694         if (aconnector->dc_em_sink)
5695                 dc_sink_release(aconnector->dc_em_sink);
5696         aconnector->dc_em_sink = NULL;
5697         if (aconnector->dc_sink)
5698                 dc_sink_release(aconnector->dc_sink);
5699         aconnector->dc_sink = NULL;
5700
5701         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5702         drm_connector_unregister(connector);
5703         drm_connector_cleanup(connector);
5704         if (aconnector->i2c) {
5705                 i2c_del_adapter(&aconnector->i2c->base);
5706                 kfree(aconnector->i2c);
5707         }
5708         kfree(aconnector->dm_dp_aux.aux.name);
5709
5710         kfree(connector);
5711 }
5712
5713 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5714 {
5715         struct dm_connector_state *state =
5716                 to_dm_connector_state(connector->state);
5717
5718         if (connector->state)
5719                 __drm_atomic_helper_connector_destroy_state(connector->state);
5720
5721         kfree(state);
5722
5723         state = kzalloc(sizeof(*state), GFP_KERNEL);
5724
5725         if (state) {
5726                 state->scaling = RMX_OFF;
5727                 state->underscan_enable = false;
5728                 state->underscan_hborder = 0;
5729                 state->underscan_vborder = 0;
5730                 state->base.max_requested_bpc = 8;
5731                 state->vcpi_slots = 0;
5732                 state->pbn = 0;
5733                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5734                         state->abm_level = amdgpu_dm_abm_level;
5735
5736                 __drm_atomic_helper_connector_reset(connector, &state->base);
5737         }
5738 }
5739
5740 struct drm_connector_state *
5741 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5742 {
5743         struct dm_connector_state *state =
5744                 to_dm_connector_state(connector->state);
5745
5746         struct dm_connector_state *new_state =
5747                         kmemdup(state, sizeof(*state), GFP_KERNEL);
5748
5749         if (!new_state)
5750                 return NULL;
5751
5752         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5753
5754         new_state->freesync_capable = state->freesync_capable;
5755         new_state->abm_level = state->abm_level;
5756         new_state->scaling = state->scaling;
5757         new_state->underscan_enable = state->underscan_enable;
5758         new_state->underscan_hborder = state->underscan_hborder;
5759         new_state->underscan_vborder = state->underscan_vborder;
5760         new_state->vcpi_slots = state->vcpi_slots;
5761         new_state->pbn = state->pbn;
5762         return &new_state->base;
5763 }
5764
5765 static int
5766 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5767 {
5768         struct amdgpu_dm_connector *amdgpu_dm_connector =
5769                 to_amdgpu_dm_connector(connector);
5770         int r;
5771
5772         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5773             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5774                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5775                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5776                 if (r)
5777                         return r;
5778         }
5779
5780 #if defined(CONFIG_DEBUG_FS)
5781         connector_debugfs_init(amdgpu_dm_connector);
5782 #endif
5783
5784         return 0;
5785 }
5786
5787 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5788         .reset = amdgpu_dm_connector_funcs_reset,
5789         .detect = amdgpu_dm_connector_detect,
5790         .fill_modes = drm_helper_probe_single_connector_modes,
5791         .destroy = amdgpu_dm_connector_destroy,
5792         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5793         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5794         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5795         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5796         .late_register = amdgpu_dm_connector_late_register,
5797         .early_unregister = amdgpu_dm_connector_unregister
5798 };
5799
5800 static int get_modes(struct drm_connector *connector)
5801 {
5802         return amdgpu_dm_connector_get_modes(connector);
5803 }
5804
5805 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5806 {
5807         struct dc_sink_init_data init_params = {
5808                         .link = aconnector->dc_link,
5809                         .sink_signal = SIGNAL_TYPE_VIRTUAL
5810         };
5811         struct edid *edid;
5812
5813         if (!aconnector->base.edid_blob_ptr) {
5814                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5815                                 aconnector->base.name);
5816
5817                 aconnector->base.force = DRM_FORCE_OFF;
5818                 aconnector->base.override_edid = false;
5819                 return;
5820         }
5821
5822         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5823
5824         aconnector->edid = edid;
5825
5826         aconnector->dc_em_sink = dc_link_add_remote_sink(
5827                 aconnector->dc_link,
5828                 (uint8_t *)edid,
5829                 (edid->extensions + 1) * EDID_LENGTH,
5830                 &init_params);
5831
5832         if (aconnector->base.force == DRM_FORCE_ON) {
5833                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5834                 aconnector->dc_link->local_sink :
5835                 aconnector->dc_em_sink;
5836                 dc_sink_retain(aconnector->dc_sink);
5837         }
5838 }
5839
5840 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5841 {
5842         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5843
5844         /*
5845          * In case of headless boot with force on for DP managed connector
5846          * Those settings have to be != 0 to get initial modeset
5847          */
5848         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5849                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5850                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5851         }
5852
5853
5854         aconnector->base.override_edid = true;
5855         create_eml_sink(aconnector);
5856 }
5857
5858 static struct dc_stream_state *
5859 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5860                                 const struct drm_display_mode *drm_mode,
5861                                 const struct dm_connector_state *dm_state,
5862                                 const struct dc_stream_state *old_stream)
5863 {
5864         struct drm_connector *connector = &aconnector->base;
5865         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5866         struct dc_stream_state *stream;
5867         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5868         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5869         enum dc_status dc_result = DC_OK;
5870
5871         do {
5872                 stream = create_stream_for_sink(aconnector, drm_mode,
5873                                                 dm_state, old_stream,
5874                                                 requested_bpc);
5875                 if (stream == NULL) {
5876                         DRM_ERROR("Failed to create stream for sink!\n");
5877                         break;
5878                 }
5879
5880                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5881
5882                 if (dc_result != DC_OK) {
5883                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5884                                       drm_mode->hdisplay,
5885                                       drm_mode->vdisplay,
5886                                       drm_mode->clock,
5887                                       dc_result,
5888                                       dc_status_to_str(dc_result));
5889
5890                         dc_stream_release(stream);
5891                         stream = NULL;
5892                         requested_bpc -= 2; /* lower bpc to retry validation */
5893                 }
5894
5895         } while (stream == NULL && requested_bpc >= 6);
5896
5897         return stream;
5898 }
5899
5900 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5901                                    struct drm_display_mode *mode)
5902 {
5903         int result = MODE_ERROR;
5904         struct dc_sink *dc_sink;
5905         /* TODO: Unhardcode stream count */
5906         struct dc_stream_state *stream;
5907         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5908
5909         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5910                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5911                 return result;
5912
5913         /*
5914          * Only run this the first time mode_valid is called to initilialize
5915          * EDID mgmt
5916          */
5917         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5918                 !aconnector->dc_em_sink)
5919                 handle_edid_mgmt(aconnector);
5920
5921         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5922
5923         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5924                                 aconnector->base.force != DRM_FORCE_ON) {
5925                 DRM_ERROR("dc_sink is NULL!\n");
5926                 goto fail;
5927         }
5928
5929         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5930         if (stream) {
5931                 dc_stream_release(stream);
5932                 result = MODE_OK;
5933         }
5934
5935 fail:
5936         /* TODO: error handling*/
5937         return result;
5938 }
5939
5940 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5941                                 struct dc_info_packet *out)
5942 {
5943         struct hdmi_drm_infoframe frame;
5944         unsigned char buf[30]; /* 26 + 4 */
5945         ssize_t len;
5946         int ret, i;
5947
5948         memset(out, 0, sizeof(*out));
5949
5950         if (!state->hdr_output_metadata)
5951                 return 0;
5952
5953         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5954         if (ret)
5955                 return ret;
5956
5957         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5958         if (len < 0)
5959                 return (int)len;
5960
5961         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5962         if (len != 30)
5963                 return -EINVAL;
5964
5965         /* Prepare the infopacket for DC. */
5966         switch (state->connector->connector_type) {
5967         case DRM_MODE_CONNECTOR_HDMIA:
5968                 out->hb0 = 0x87; /* type */
5969                 out->hb1 = 0x01; /* version */
5970                 out->hb2 = 0x1A; /* length */
5971                 out->sb[0] = buf[3]; /* checksum */
5972                 i = 1;
5973                 break;
5974
5975         case DRM_MODE_CONNECTOR_DisplayPort:
5976         case DRM_MODE_CONNECTOR_eDP:
5977                 out->hb0 = 0x00; /* sdp id, zero */
5978                 out->hb1 = 0x87; /* type */
5979                 out->hb2 = 0x1D; /* payload len - 1 */
5980                 out->hb3 = (0x13 << 2); /* sdp version */
5981                 out->sb[0] = 0x01; /* version */
5982                 out->sb[1] = 0x1A; /* length */
5983                 i = 2;
5984                 break;
5985
5986         default:
5987                 return -EINVAL;
5988         }
5989
5990         memcpy(&out->sb[i], &buf[4], 26);
5991         out->valid = true;
5992
5993         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5994                        sizeof(out->sb), false);
5995
5996         return 0;
5997 }
5998
5999 static bool
6000 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6001                           const struct drm_connector_state *new_state)
6002 {
6003         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6004         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6005
6006         if (old_blob != new_blob) {
6007                 if (old_blob && new_blob &&
6008                     old_blob->length == new_blob->length)
6009                         return memcmp(old_blob->data, new_blob->data,
6010                                       old_blob->length);
6011
6012                 return true;
6013         }
6014
6015         return false;
6016 }
6017
6018 static int
6019 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6020                                  struct drm_atomic_state *state)
6021 {
6022         struct drm_connector_state *new_con_state =
6023                 drm_atomic_get_new_connector_state(state, conn);
6024         struct drm_connector_state *old_con_state =
6025                 drm_atomic_get_old_connector_state(state, conn);
6026         struct drm_crtc *crtc = new_con_state->crtc;
6027         struct drm_crtc_state *new_crtc_state;
6028         int ret;
6029
6030         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6031
6032         if (!crtc)
6033                 return 0;
6034
6035         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6036                 struct dc_info_packet hdr_infopacket;
6037
6038                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6039                 if (ret)
6040                         return ret;
6041
6042                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6043                 if (IS_ERR(new_crtc_state))
6044                         return PTR_ERR(new_crtc_state);
6045
6046                 /*
6047                  * DC considers the stream backends changed if the
6048                  * static metadata changes. Forcing the modeset also
6049                  * gives a simple way for userspace to switch from
6050                  * 8bpc to 10bpc when setting the metadata to enter
6051                  * or exit HDR.
6052                  *
6053                  * Changing the static metadata after it's been
6054                  * set is permissible, however. So only force a
6055                  * modeset if we're entering or exiting HDR.
6056                  */
6057                 new_crtc_state->mode_changed =
6058                         !old_con_state->hdr_output_metadata ||
6059                         !new_con_state->hdr_output_metadata;
6060         }
6061
6062         return 0;
6063 }
6064
6065 static const struct drm_connector_helper_funcs
6066 amdgpu_dm_connector_helper_funcs = {
6067         /*
6068          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6069          * modes will be filtered by drm_mode_validate_size(), and those modes
6070          * are missing after user start lightdm. So we need to renew modes list.
6071          * in get_modes call back, not just return the modes count
6072          */
6073         .get_modes = get_modes,
6074         .mode_valid = amdgpu_dm_connector_mode_valid,
6075         .atomic_check = amdgpu_dm_connector_atomic_check,
6076 };
6077
6078 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6079 {
6080 }
6081
6082 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6083 {
6084         struct drm_atomic_state *state = new_crtc_state->state;
6085         struct drm_plane *plane;
6086         int num_active = 0;
6087
6088         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6089                 struct drm_plane_state *new_plane_state;
6090
6091                 /* Cursor planes are "fake". */
6092                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6093                         continue;
6094
6095                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6096
6097                 if (!new_plane_state) {
6098                         /*
6099                          * The plane is enable on the CRTC and hasn't changed
6100                          * state. This means that it previously passed
6101                          * validation and is therefore enabled.
6102                          */
6103                         num_active += 1;
6104                         continue;
6105                 }
6106
6107                 /* We need a framebuffer to be considered enabled. */
6108                 num_active += (new_plane_state->fb != NULL);
6109         }
6110
6111         return num_active;
6112 }
6113
6114 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6115                                          struct drm_crtc_state *new_crtc_state)
6116 {
6117         struct dm_crtc_state *dm_new_crtc_state =
6118                 to_dm_crtc_state(new_crtc_state);
6119
6120         dm_new_crtc_state->active_planes = 0;
6121
6122         if (!dm_new_crtc_state->stream)
6123                 return;
6124
6125         dm_new_crtc_state->active_planes =
6126                 count_crtc_active_planes(new_crtc_state);
6127 }
6128
6129 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6130                                        struct drm_atomic_state *state)
6131 {
6132         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6133                                                                           crtc);
6134         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6135         struct dc *dc = adev->dm.dc;
6136         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6137         int ret = -EINVAL;
6138
6139         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6140
6141         dm_update_crtc_active_planes(crtc, crtc_state);
6142
6143         if (unlikely(!dm_crtc_state->stream &&
6144                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6145                 WARN_ON(1);
6146                 return ret;
6147         }
6148
6149         /*
6150          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6151          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6152          * planes are disabled, which is not supported by the hardware. And there is legacy
6153          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6154          */
6155         if (crtc_state->enable &&
6156             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6157                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6158                 return -EINVAL;
6159         }
6160
6161         /* In some use cases, like reset, no stream is attached */
6162         if (!dm_crtc_state->stream)
6163                 return 0;
6164
6165         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6166                 return 0;
6167
6168         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6169         return ret;
6170 }
6171
6172 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6173                                       const struct drm_display_mode *mode,
6174                                       struct drm_display_mode *adjusted_mode)
6175 {
6176         return true;
6177 }
6178
6179 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6180         .disable = dm_crtc_helper_disable,
6181         .atomic_check = dm_crtc_helper_atomic_check,
6182         .mode_fixup = dm_crtc_helper_mode_fixup,
6183         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6184 };
6185
6186 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6187 {
6188
6189 }
6190
6191 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6192 {
6193         switch (display_color_depth) {
6194                 case COLOR_DEPTH_666:
6195                         return 6;
6196                 case COLOR_DEPTH_888:
6197                         return 8;
6198                 case COLOR_DEPTH_101010:
6199                         return 10;
6200                 case COLOR_DEPTH_121212:
6201                         return 12;
6202                 case COLOR_DEPTH_141414:
6203                         return 14;
6204                 case COLOR_DEPTH_161616:
6205                         return 16;
6206                 default:
6207                         break;
6208                 }
6209         return 0;
6210 }
6211
6212 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6213                                           struct drm_crtc_state *crtc_state,
6214                                           struct drm_connector_state *conn_state)
6215 {
6216         struct drm_atomic_state *state = crtc_state->state;
6217         struct drm_connector *connector = conn_state->connector;
6218         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6219         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6220         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6221         struct drm_dp_mst_topology_mgr *mst_mgr;
6222         struct drm_dp_mst_port *mst_port;
6223         enum dc_color_depth color_depth;
6224         int clock, bpp = 0;
6225         bool is_y420 = false;
6226
6227         if (!aconnector->port || !aconnector->dc_sink)
6228                 return 0;
6229
6230         mst_port = aconnector->port;
6231         mst_mgr = &aconnector->mst_port->mst_mgr;
6232
6233         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6234                 return 0;
6235
6236         if (!state->duplicated) {
6237                 int max_bpc = conn_state->max_requested_bpc;
6238                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6239                                 aconnector->force_yuv420_output;
6240                 color_depth = convert_color_depth_from_display_info(connector,
6241                                                                     is_y420,
6242                                                                     max_bpc);
6243                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6244                 clock = adjusted_mode->clock;
6245                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6246         }
6247         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6248                                                                            mst_mgr,
6249                                                                            mst_port,
6250                                                                            dm_new_connector_state->pbn,
6251                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6252         if (dm_new_connector_state->vcpi_slots < 0) {
6253                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6254                 return dm_new_connector_state->vcpi_slots;
6255         }
6256         return 0;
6257 }
6258
6259 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6260         .disable = dm_encoder_helper_disable,
6261         .atomic_check = dm_encoder_helper_atomic_check
6262 };
6263
6264 #if defined(CONFIG_DRM_AMD_DC_DCN)
6265 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6266                                             struct dc_state *dc_state)
6267 {
6268         struct dc_stream_state *stream = NULL;
6269         struct drm_connector *connector;
6270         struct drm_connector_state *new_con_state, *old_con_state;
6271         struct amdgpu_dm_connector *aconnector;
6272         struct dm_connector_state *dm_conn_state;
6273         int i, j, clock, bpp;
6274         int vcpi, pbn_div, pbn = 0;
6275
6276         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6277
6278                 aconnector = to_amdgpu_dm_connector(connector);
6279
6280                 if (!aconnector->port)
6281                         continue;
6282
6283                 if (!new_con_state || !new_con_state->crtc)
6284                         continue;
6285
6286                 dm_conn_state = to_dm_connector_state(new_con_state);
6287
6288                 for (j = 0; j < dc_state->stream_count; j++) {
6289                         stream = dc_state->streams[j];
6290                         if (!stream)
6291                                 continue;
6292
6293                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6294                                 break;
6295
6296                         stream = NULL;
6297                 }
6298
6299                 if (!stream)
6300                         continue;
6301
6302                 if (stream->timing.flags.DSC != 1) {
6303                         drm_dp_mst_atomic_enable_dsc(state,
6304                                                      aconnector->port,
6305                                                      dm_conn_state->pbn,
6306                                                      0,
6307                                                      false);
6308                         continue;
6309                 }
6310
6311                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6312                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6313                 clock = stream->timing.pix_clk_100hz / 10;
6314                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6315                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6316                                                     aconnector->port,
6317                                                     pbn, pbn_div,
6318                                                     true);
6319                 if (vcpi < 0)
6320                         return vcpi;
6321
6322                 dm_conn_state->pbn = pbn;
6323                 dm_conn_state->vcpi_slots = vcpi;
6324         }
6325         return 0;
6326 }
6327 #endif
6328
6329 static void dm_drm_plane_reset(struct drm_plane *plane)
6330 {
6331         struct dm_plane_state *amdgpu_state = NULL;
6332
6333         if (plane->state)
6334                 plane->funcs->atomic_destroy_state(plane, plane->state);
6335
6336         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6337         WARN_ON(amdgpu_state == NULL);
6338
6339         if (amdgpu_state)
6340                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6341 }
6342
6343 static struct drm_plane_state *
6344 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6345 {
6346         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6347
6348         old_dm_plane_state = to_dm_plane_state(plane->state);
6349         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6350         if (!dm_plane_state)
6351                 return NULL;
6352
6353         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6354
6355         if (old_dm_plane_state->dc_state) {
6356                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6357                 dc_plane_state_retain(dm_plane_state->dc_state);
6358         }
6359
6360         return &dm_plane_state->base;
6361 }
6362
6363 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6364                                 struct drm_plane_state *state)
6365 {
6366         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6367
6368         if (dm_plane_state->dc_state)
6369                 dc_plane_state_release(dm_plane_state->dc_state);
6370
6371         drm_atomic_helper_plane_destroy_state(plane, state);
6372 }
6373
6374 static const struct drm_plane_funcs dm_plane_funcs = {
6375         .update_plane   = drm_atomic_helper_update_plane,
6376         .disable_plane  = drm_atomic_helper_disable_plane,
6377         .destroy        = drm_primary_helper_destroy,
6378         .reset = dm_drm_plane_reset,
6379         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6380         .atomic_destroy_state = dm_drm_plane_destroy_state,
6381         .format_mod_supported = dm_plane_format_mod_supported,
6382 };
6383
6384 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6385                                       struct drm_plane_state *new_state)
6386 {
6387         struct amdgpu_framebuffer *afb;
6388         struct drm_gem_object *obj;
6389         struct amdgpu_device *adev;
6390         struct amdgpu_bo *rbo;
6391         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6392         struct list_head list;
6393         struct ttm_validate_buffer tv;
6394         struct ww_acquire_ctx ticket;
6395         uint32_t domain;
6396         int r;
6397
6398         if (!new_state->fb) {
6399                 DRM_DEBUG_DRIVER("No FB bound\n");
6400                 return 0;
6401         }
6402
6403         afb = to_amdgpu_framebuffer(new_state->fb);
6404         obj = new_state->fb->obj[0];
6405         rbo = gem_to_amdgpu_bo(obj);
6406         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6407         INIT_LIST_HEAD(&list);
6408
6409         tv.bo = &rbo->tbo;
6410         tv.num_shared = 1;
6411         list_add(&tv.head, &list);
6412
6413         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6414         if (r) {
6415                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6416                 return r;
6417         }
6418
6419         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6420                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6421         else
6422                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6423
6424         r = amdgpu_bo_pin(rbo, domain);
6425         if (unlikely(r != 0)) {
6426                 if (r != -ERESTARTSYS)
6427                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6428                 ttm_eu_backoff_reservation(&ticket, &list);
6429                 return r;
6430         }
6431
6432         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6433         if (unlikely(r != 0)) {
6434                 amdgpu_bo_unpin(rbo);
6435                 ttm_eu_backoff_reservation(&ticket, &list);
6436                 DRM_ERROR("%p bind failed\n", rbo);
6437                 return r;
6438         }
6439
6440         ttm_eu_backoff_reservation(&ticket, &list);
6441
6442         afb->address = amdgpu_bo_gpu_offset(rbo);
6443
6444         amdgpu_bo_ref(rbo);
6445
6446         /**
6447          * We don't do surface updates on planes that have been newly created,
6448          * but we also don't have the afb->address during atomic check.
6449          *
6450          * Fill in buffer attributes depending on the address here, but only on
6451          * newly created planes since they're not being used by DC yet and this
6452          * won't modify global state.
6453          */
6454         dm_plane_state_old = to_dm_plane_state(plane->state);
6455         dm_plane_state_new = to_dm_plane_state(new_state);
6456
6457         if (dm_plane_state_new->dc_state &&
6458             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6459                 struct dc_plane_state *plane_state =
6460                         dm_plane_state_new->dc_state;
6461                 bool force_disable_dcc = !plane_state->dcc.enable;
6462
6463                 fill_plane_buffer_attributes(
6464                         adev, afb, plane_state->format, plane_state->rotation,
6465                         afb->tiling_flags,
6466                         &plane_state->tiling_info, &plane_state->plane_size,
6467                         &plane_state->dcc, &plane_state->address,
6468                         afb->tmz_surface, force_disable_dcc);
6469         }
6470
6471         return 0;
6472 }
6473
6474 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6475                                        struct drm_plane_state *old_state)
6476 {
6477         struct amdgpu_bo *rbo;
6478         int r;
6479
6480         if (!old_state->fb)
6481                 return;
6482
6483         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6484         r = amdgpu_bo_reserve(rbo, false);
6485         if (unlikely(r)) {
6486                 DRM_ERROR("failed to reserve rbo before unpin\n");
6487                 return;
6488         }
6489
6490         amdgpu_bo_unpin(rbo);
6491         amdgpu_bo_unreserve(rbo);
6492         amdgpu_bo_unref(&rbo);
6493 }
6494
6495 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6496                                        struct drm_crtc_state *new_crtc_state)
6497 {
6498         struct drm_framebuffer *fb = state->fb;
6499         int min_downscale, max_upscale;
6500         int min_scale = 0;
6501         int max_scale = INT_MAX;
6502
6503         /* Plane enabled? Get min/max allowed scaling factors from plane caps. */
6504         if (fb && state->crtc) {
6505                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6506                                              &min_downscale, &max_upscale);
6507                 /*
6508                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6509                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6510                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6511                  */
6512                 min_scale = (1000 << 16) / max_upscale;
6513                 max_scale = (1000 << 16) / min_downscale;
6514         }
6515
6516         return drm_atomic_helper_check_plane_state(
6517                 state, new_crtc_state, min_scale, max_scale, true, true);
6518 }
6519
6520 static int dm_plane_atomic_check(struct drm_plane *plane,
6521                                  struct drm_plane_state *state)
6522 {
6523         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6524         struct dc *dc = adev->dm.dc;
6525         struct dm_plane_state *dm_plane_state;
6526         struct dc_scaling_info scaling_info;
6527         struct drm_crtc_state *new_crtc_state;
6528         int ret;
6529
6530         trace_amdgpu_dm_plane_atomic_check(state);
6531
6532         dm_plane_state = to_dm_plane_state(state);
6533
6534         if (!dm_plane_state->dc_state)
6535                 return 0;
6536
6537         new_crtc_state =
6538                 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6539         if (!new_crtc_state)
6540                 return -EINVAL;
6541
6542         ret = dm_plane_helper_check_state(state, new_crtc_state);
6543         if (ret)
6544                 return ret;
6545
6546         ret = fill_dc_scaling_info(state, &scaling_info);
6547         if (ret)
6548                 return ret;
6549
6550         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6551                 return 0;
6552
6553         return -EINVAL;
6554 }
6555
6556 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6557                                        struct drm_plane_state *new_plane_state)
6558 {
6559         /* Only support async updates on cursor planes. */
6560         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6561                 return -EINVAL;
6562
6563         return 0;
6564 }
6565
6566 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6567                                          struct drm_plane_state *new_state)
6568 {
6569         struct drm_plane_state *old_state =
6570                 drm_atomic_get_old_plane_state(new_state->state, plane);
6571
6572         trace_amdgpu_dm_atomic_update_cursor(new_state);
6573
6574         swap(plane->state->fb, new_state->fb);
6575
6576         plane->state->src_x = new_state->src_x;
6577         plane->state->src_y = new_state->src_y;
6578         plane->state->src_w = new_state->src_w;
6579         plane->state->src_h = new_state->src_h;
6580         plane->state->crtc_x = new_state->crtc_x;
6581         plane->state->crtc_y = new_state->crtc_y;
6582         plane->state->crtc_w = new_state->crtc_w;
6583         plane->state->crtc_h = new_state->crtc_h;
6584
6585         handle_cursor_update(plane, old_state);
6586 }
6587
6588 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6589         .prepare_fb = dm_plane_helper_prepare_fb,
6590         .cleanup_fb = dm_plane_helper_cleanup_fb,
6591         .atomic_check = dm_plane_atomic_check,
6592         .atomic_async_check = dm_plane_atomic_async_check,
6593         .atomic_async_update = dm_plane_atomic_async_update
6594 };
6595
6596 /*
6597  * TODO: these are currently initialized to rgb formats only.
6598  * For future use cases we should either initialize them dynamically based on
6599  * plane capabilities, or initialize this array to all formats, so internal drm
6600  * check will succeed, and let DC implement proper check
6601  */
6602 static const uint32_t rgb_formats[] = {
6603         DRM_FORMAT_XRGB8888,
6604         DRM_FORMAT_ARGB8888,
6605         DRM_FORMAT_RGBA8888,
6606         DRM_FORMAT_XRGB2101010,
6607         DRM_FORMAT_XBGR2101010,
6608         DRM_FORMAT_ARGB2101010,
6609         DRM_FORMAT_ABGR2101010,
6610         DRM_FORMAT_XBGR8888,
6611         DRM_FORMAT_ABGR8888,
6612         DRM_FORMAT_RGB565,
6613 };
6614
6615 static const uint32_t overlay_formats[] = {
6616         DRM_FORMAT_XRGB8888,
6617         DRM_FORMAT_ARGB8888,
6618         DRM_FORMAT_RGBA8888,
6619         DRM_FORMAT_XBGR8888,
6620         DRM_FORMAT_ABGR8888,
6621         DRM_FORMAT_RGB565
6622 };
6623
6624 static const u32 cursor_formats[] = {
6625         DRM_FORMAT_ARGB8888
6626 };
6627
6628 static int get_plane_formats(const struct drm_plane *plane,
6629                              const struct dc_plane_cap *plane_cap,
6630                              uint32_t *formats, int max_formats)
6631 {
6632         int i, num_formats = 0;
6633
6634         /*
6635          * TODO: Query support for each group of formats directly from
6636          * DC plane caps. This will require adding more formats to the
6637          * caps list.
6638          */
6639
6640         switch (plane->type) {
6641         case DRM_PLANE_TYPE_PRIMARY:
6642                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6643                         if (num_formats >= max_formats)
6644                                 break;
6645
6646                         formats[num_formats++] = rgb_formats[i];
6647                 }
6648
6649                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6650                         formats[num_formats++] = DRM_FORMAT_NV12;
6651                 if (plane_cap && plane_cap->pixel_format_support.p010)
6652                         formats[num_formats++] = DRM_FORMAT_P010;
6653                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6654                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6655                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6656                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6657                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6658                 }
6659                 break;
6660
6661         case DRM_PLANE_TYPE_OVERLAY:
6662                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6663                         if (num_formats >= max_formats)
6664                                 break;
6665
6666                         formats[num_formats++] = overlay_formats[i];
6667                 }
6668                 break;
6669
6670         case DRM_PLANE_TYPE_CURSOR:
6671                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6672                         if (num_formats >= max_formats)
6673                                 break;
6674
6675                         formats[num_formats++] = cursor_formats[i];
6676                 }
6677                 break;
6678         }
6679
6680         return num_formats;
6681 }
6682
6683 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6684                                 struct drm_plane *plane,
6685                                 unsigned long possible_crtcs,
6686                                 const struct dc_plane_cap *plane_cap)
6687 {
6688         uint32_t formats[32];
6689         int num_formats;
6690         int res = -EPERM;
6691         unsigned int supported_rotations;
6692         uint64_t *modifiers = NULL;
6693
6694         num_formats = get_plane_formats(plane, plane_cap, formats,
6695                                         ARRAY_SIZE(formats));
6696
6697         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6698         if (res)
6699                 return res;
6700
6701         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6702                                        &dm_plane_funcs, formats, num_formats,
6703                                        modifiers, plane->type, NULL);
6704         kfree(modifiers);
6705         if (res)
6706                 return res;
6707
6708         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6709             plane_cap && plane_cap->per_pixel_alpha) {
6710                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6711                                           BIT(DRM_MODE_BLEND_PREMULTI);
6712
6713                 drm_plane_create_alpha_property(plane);
6714                 drm_plane_create_blend_mode_property(plane, blend_caps);
6715         }
6716
6717         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6718             plane_cap &&
6719             (plane_cap->pixel_format_support.nv12 ||
6720              plane_cap->pixel_format_support.p010)) {
6721                 /* This only affects YUV formats. */
6722                 drm_plane_create_color_properties(
6723                         plane,
6724                         BIT(DRM_COLOR_YCBCR_BT601) |
6725                         BIT(DRM_COLOR_YCBCR_BT709) |
6726                         BIT(DRM_COLOR_YCBCR_BT2020),
6727                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6728                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6729                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6730         }
6731
6732         supported_rotations =
6733                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6734                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6735
6736         if (dm->adev->asic_type >= CHIP_BONAIRE &&
6737             plane->type != DRM_PLANE_TYPE_CURSOR)
6738                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6739                                                    supported_rotations);
6740
6741         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6742
6743         /* Create (reset) the plane state */
6744         if (plane->funcs->reset)
6745                 plane->funcs->reset(plane);
6746
6747         return 0;
6748 }
6749
6750 #ifdef CONFIG_DEBUG_FS
6751 static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
6752                                 struct amdgpu_crtc *acrtc)
6753 {
6754         drm_object_attach_property(&acrtc->base.base,
6755                                    dm->crc_win_x_start_property,
6756                                    0);
6757         drm_object_attach_property(&acrtc->base.base,
6758                                    dm->crc_win_y_start_property,
6759                                    0);
6760         drm_object_attach_property(&acrtc->base.base,
6761                                    dm->crc_win_x_end_property,
6762                                    0);
6763         drm_object_attach_property(&acrtc->base.base,
6764                                    dm->crc_win_y_end_property,
6765                                    0);
6766 }
6767 #endif
6768
6769 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6770                                struct drm_plane *plane,
6771                                uint32_t crtc_index)
6772 {
6773         struct amdgpu_crtc *acrtc = NULL;
6774         struct drm_plane *cursor_plane;
6775
6776         int res = -ENOMEM;
6777
6778         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6779         if (!cursor_plane)
6780                 goto fail;
6781
6782         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6783         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6784
6785         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6786         if (!acrtc)
6787                 goto fail;
6788
6789         res = drm_crtc_init_with_planes(
6790                         dm->ddev,
6791                         &acrtc->base,
6792                         plane,
6793                         cursor_plane,
6794                         &amdgpu_dm_crtc_funcs, NULL);
6795
6796         if (res)
6797                 goto fail;
6798
6799         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6800
6801         /* Create (reset) the plane state */
6802         if (acrtc->base.funcs->reset)
6803                 acrtc->base.funcs->reset(&acrtc->base);
6804
6805         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6806         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6807
6808         acrtc->crtc_id = crtc_index;
6809         acrtc->base.enabled = false;
6810         acrtc->otg_inst = -1;
6811
6812         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6813         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6814                                    true, MAX_COLOR_LUT_ENTRIES);
6815         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6816 #ifdef CONFIG_DEBUG_FS
6817         attach_crtc_crc_properties(dm, acrtc);
6818 #endif
6819         return 0;
6820
6821 fail:
6822         kfree(acrtc);
6823         kfree(cursor_plane);
6824         return res;
6825 }
6826
6827
6828 static int to_drm_connector_type(enum signal_type st)
6829 {
6830         switch (st) {
6831         case SIGNAL_TYPE_HDMI_TYPE_A:
6832                 return DRM_MODE_CONNECTOR_HDMIA;
6833         case SIGNAL_TYPE_EDP:
6834                 return DRM_MODE_CONNECTOR_eDP;
6835         case SIGNAL_TYPE_LVDS:
6836                 return DRM_MODE_CONNECTOR_LVDS;
6837         case SIGNAL_TYPE_RGB:
6838                 return DRM_MODE_CONNECTOR_VGA;
6839         case SIGNAL_TYPE_DISPLAY_PORT:
6840         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6841                 return DRM_MODE_CONNECTOR_DisplayPort;
6842         case SIGNAL_TYPE_DVI_DUAL_LINK:
6843         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6844                 return DRM_MODE_CONNECTOR_DVID;
6845         case SIGNAL_TYPE_VIRTUAL:
6846                 return DRM_MODE_CONNECTOR_VIRTUAL;
6847
6848         default:
6849                 return DRM_MODE_CONNECTOR_Unknown;
6850         }
6851 }
6852
6853 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6854 {
6855         struct drm_encoder *encoder;
6856
6857         /* There is only one encoder per connector */
6858         drm_connector_for_each_possible_encoder(connector, encoder)
6859                 return encoder;
6860
6861         return NULL;
6862 }
6863
6864 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6865 {
6866         struct drm_encoder *encoder;
6867         struct amdgpu_encoder *amdgpu_encoder;
6868
6869         encoder = amdgpu_dm_connector_to_encoder(connector);
6870
6871         if (encoder == NULL)
6872                 return;
6873
6874         amdgpu_encoder = to_amdgpu_encoder(encoder);
6875
6876         amdgpu_encoder->native_mode.clock = 0;
6877
6878         if (!list_empty(&connector->probed_modes)) {
6879                 struct drm_display_mode *preferred_mode = NULL;
6880
6881                 list_for_each_entry(preferred_mode,
6882                                     &connector->probed_modes,
6883                                     head) {
6884                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6885                                 amdgpu_encoder->native_mode = *preferred_mode;
6886
6887                         break;
6888                 }
6889
6890         }
6891 }
6892
6893 static struct drm_display_mode *
6894 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6895                              char *name,
6896                              int hdisplay, int vdisplay)
6897 {
6898         struct drm_device *dev = encoder->dev;
6899         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6900         struct drm_display_mode *mode = NULL;
6901         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6902
6903         mode = drm_mode_duplicate(dev, native_mode);
6904
6905         if (mode == NULL)
6906                 return NULL;
6907
6908         mode->hdisplay = hdisplay;
6909         mode->vdisplay = vdisplay;
6910         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6911         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6912
6913         return mode;
6914
6915 }
6916
6917 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6918                                                  struct drm_connector *connector)
6919 {
6920         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6921         struct drm_display_mode *mode = NULL;
6922         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6923         struct amdgpu_dm_connector *amdgpu_dm_connector =
6924                                 to_amdgpu_dm_connector(connector);
6925         int i;
6926         int n;
6927         struct mode_size {
6928                 char name[DRM_DISPLAY_MODE_LEN];
6929                 int w;
6930                 int h;
6931         } common_modes[] = {
6932                 {  "640x480",  640,  480},
6933                 {  "800x600",  800,  600},
6934                 { "1024x768", 1024,  768},
6935                 { "1280x720", 1280,  720},
6936                 { "1280x800", 1280,  800},
6937                 {"1280x1024", 1280, 1024},
6938                 { "1440x900", 1440,  900},
6939                 {"1680x1050", 1680, 1050},
6940                 {"1600x1200", 1600, 1200},
6941                 {"1920x1080", 1920, 1080},
6942                 {"1920x1200", 1920, 1200}
6943         };
6944
6945         n = ARRAY_SIZE(common_modes);
6946
6947         for (i = 0; i < n; i++) {
6948                 struct drm_display_mode *curmode = NULL;
6949                 bool mode_existed = false;
6950
6951                 if (common_modes[i].w > native_mode->hdisplay ||
6952                     common_modes[i].h > native_mode->vdisplay ||
6953                    (common_modes[i].w == native_mode->hdisplay &&
6954                     common_modes[i].h == native_mode->vdisplay))
6955                         continue;
6956
6957                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6958                         if (common_modes[i].w == curmode->hdisplay &&
6959                             common_modes[i].h == curmode->vdisplay) {
6960                                 mode_existed = true;
6961                                 break;
6962                         }
6963                 }
6964
6965                 if (mode_existed)
6966                         continue;
6967
6968                 mode = amdgpu_dm_create_common_mode(encoder,
6969                                 common_modes[i].name, common_modes[i].w,
6970                                 common_modes[i].h);
6971                 drm_mode_probed_add(connector, mode);
6972                 amdgpu_dm_connector->num_modes++;
6973         }
6974 }
6975
6976 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6977                                               struct edid *edid)
6978 {
6979         struct amdgpu_dm_connector *amdgpu_dm_connector =
6980                         to_amdgpu_dm_connector(connector);
6981
6982         if (edid) {
6983                 /* empty probed_modes */
6984                 INIT_LIST_HEAD(&connector->probed_modes);
6985                 amdgpu_dm_connector->num_modes =
6986                                 drm_add_edid_modes(connector, edid);
6987
6988                 /* sorting the probed modes before calling function
6989                  * amdgpu_dm_get_native_mode() since EDID can have
6990                  * more than one preferred mode. The modes that are
6991                  * later in the probed mode list could be of higher
6992                  * and preferred resolution. For example, 3840x2160
6993                  * resolution in base EDID preferred timing and 4096x2160
6994                  * preferred resolution in DID extension block later.
6995                  */
6996                 drm_mode_sort(&connector->probed_modes);
6997                 amdgpu_dm_get_native_mode(connector);
6998         } else {
6999                 amdgpu_dm_connector->num_modes = 0;
7000         }
7001 }
7002
7003 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7004 {
7005         struct amdgpu_dm_connector *amdgpu_dm_connector =
7006                         to_amdgpu_dm_connector(connector);
7007         struct drm_encoder *encoder;
7008         struct edid *edid = amdgpu_dm_connector->edid;
7009
7010         encoder = amdgpu_dm_connector_to_encoder(connector);
7011
7012         if (!drm_edid_is_valid(edid)) {
7013                 amdgpu_dm_connector->num_modes =
7014                                 drm_add_modes_noedid(connector, 640, 480);
7015         } else {
7016                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7017                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7018         }
7019         amdgpu_dm_fbc_init(connector);
7020
7021         return amdgpu_dm_connector->num_modes;
7022 }
7023
7024 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7025                                      struct amdgpu_dm_connector *aconnector,
7026                                      int connector_type,
7027                                      struct dc_link *link,
7028                                      int link_index)
7029 {
7030         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7031
7032         /*
7033          * Some of the properties below require access to state, like bpc.
7034          * Allocate some default initial connector state with our reset helper.
7035          */
7036         if (aconnector->base.funcs->reset)
7037                 aconnector->base.funcs->reset(&aconnector->base);
7038
7039         aconnector->connector_id = link_index;
7040         aconnector->dc_link = link;
7041         aconnector->base.interlace_allowed = false;
7042         aconnector->base.doublescan_allowed = false;
7043         aconnector->base.stereo_allowed = false;
7044         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7045         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7046         aconnector->audio_inst = -1;
7047         mutex_init(&aconnector->hpd_lock);
7048
7049         /*
7050          * configure support HPD hot plug connector_>polled default value is 0
7051          * which means HPD hot plug not supported
7052          */
7053         switch (connector_type) {
7054         case DRM_MODE_CONNECTOR_HDMIA:
7055                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7056                 aconnector->base.ycbcr_420_allowed =
7057                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7058                 break;
7059         case DRM_MODE_CONNECTOR_DisplayPort:
7060                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7061                 aconnector->base.ycbcr_420_allowed =
7062                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7063                 break;
7064         case DRM_MODE_CONNECTOR_DVID:
7065                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7066                 break;
7067         default:
7068                 break;
7069         }
7070
7071         drm_object_attach_property(&aconnector->base.base,
7072                                 dm->ddev->mode_config.scaling_mode_property,
7073                                 DRM_MODE_SCALE_NONE);
7074
7075         drm_object_attach_property(&aconnector->base.base,
7076                                 adev->mode_info.underscan_property,
7077                                 UNDERSCAN_OFF);
7078         drm_object_attach_property(&aconnector->base.base,
7079                                 adev->mode_info.underscan_hborder_property,
7080                                 0);
7081         drm_object_attach_property(&aconnector->base.base,
7082                                 adev->mode_info.underscan_vborder_property,
7083                                 0);
7084
7085         if (!aconnector->mst_port)
7086                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7087
7088         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7089         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7090         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7091
7092         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7093             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7094                 drm_object_attach_property(&aconnector->base.base,
7095                                 adev->mode_info.abm_level_property, 0);
7096         }
7097
7098         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7099             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7100             connector_type == DRM_MODE_CONNECTOR_eDP) {
7101                 drm_object_attach_property(
7102                         &aconnector->base.base,
7103                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
7104
7105                 if (!aconnector->mst_port)
7106                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7107
7108 #ifdef CONFIG_DRM_AMD_DC_HDCP
7109                 if (adev->dm.hdcp_workqueue)
7110                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7111 #endif
7112         }
7113 }
7114
7115 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7116                               struct i2c_msg *msgs, int num)
7117 {
7118         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7119         struct ddc_service *ddc_service = i2c->ddc_service;
7120         struct i2c_command cmd;
7121         int i;
7122         int result = -EIO;
7123
7124         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7125
7126         if (!cmd.payloads)
7127                 return result;
7128
7129         cmd.number_of_payloads = num;
7130         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7131         cmd.speed = 100;
7132
7133         for (i = 0; i < num; i++) {
7134                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7135                 cmd.payloads[i].address = msgs[i].addr;
7136                 cmd.payloads[i].length = msgs[i].len;
7137                 cmd.payloads[i].data = msgs[i].buf;
7138         }
7139
7140         if (dc_submit_i2c(
7141                         ddc_service->ctx->dc,
7142                         ddc_service->ddc_pin->hw_info.ddc_channel,
7143                         &cmd))
7144                 result = num;
7145
7146         kfree(cmd.payloads);
7147         return result;
7148 }
7149
7150 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7151 {
7152         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7153 }
7154
7155 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7156         .master_xfer = amdgpu_dm_i2c_xfer,
7157         .functionality = amdgpu_dm_i2c_func,
7158 };
7159
7160 static struct amdgpu_i2c_adapter *
7161 create_i2c(struct ddc_service *ddc_service,
7162            int link_index,
7163            int *res)
7164 {
7165         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7166         struct amdgpu_i2c_adapter *i2c;
7167
7168         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7169         if (!i2c)
7170                 return NULL;
7171         i2c->base.owner = THIS_MODULE;
7172         i2c->base.class = I2C_CLASS_DDC;
7173         i2c->base.dev.parent = &adev->pdev->dev;
7174         i2c->base.algo = &amdgpu_dm_i2c_algo;
7175         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7176         i2c_set_adapdata(&i2c->base, i2c);
7177         i2c->ddc_service = ddc_service;
7178         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7179
7180         return i2c;
7181 }
7182
7183
7184 /*
7185  * Note: this function assumes that dc_link_detect() was called for the
7186  * dc_link which will be represented by this aconnector.
7187  */
7188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7189                                     struct amdgpu_dm_connector *aconnector,
7190                                     uint32_t link_index,
7191                                     struct amdgpu_encoder *aencoder)
7192 {
7193         int res = 0;
7194         int connector_type;
7195         struct dc *dc = dm->dc;
7196         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7197         struct amdgpu_i2c_adapter *i2c;
7198
7199         link->priv = aconnector;
7200
7201         DRM_DEBUG_DRIVER("%s()\n", __func__);
7202
7203         i2c = create_i2c(link->ddc, link->link_index, &res);
7204         if (!i2c) {
7205                 DRM_ERROR("Failed to create i2c adapter data\n");
7206                 return -ENOMEM;
7207         }
7208
7209         aconnector->i2c = i2c;
7210         res = i2c_add_adapter(&i2c->base);
7211
7212         if (res) {
7213                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7214                 goto out_free;
7215         }
7216
7217         connector_type = to_drm_connector_type(link->connector_signal);
7218
7219         res = drm_connector_init_with_ddc(
7220                         dm->ddev,
7221                         &aconnector->base,
7222                         &amdgpu_dm_connector_funcs,
7223                         connector_type,
7224                         &i2c->base);
7225
7226         if (res) {
7227                 DRM_ERROR("connector_init failed\n");
7228                 aconnector->connector_id = -1;
7229                 goto out_free;
7230         }
7231
7232         drm_connector_helper_add(
7233                         &aconnector->base,
7234                         &amdgpu_dm_connector_helper_funcs);
7235
7236         amdgpu_dm_connector_init_helper(
7237                 dm,
7238                 aconnector,
7239                 connector_type,
7240                 link,
7241                 link_index);
7242
7243         drm_connector_attach_encoder(
7244                 &aconnector->base, &aencoder->base);
7245
7246         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7247                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7248                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7249
7250 out_free:
7251         if (res) {
7252                 kfree(i2c);
7253                 aconnector->i2c = NULL;
7254         }
7255         return res;
7256 }
7257
7258 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7259 {
7260         switch (adev->mode_info.num_crtc) {
7261         case 1:
7262                 return 0x1;
7263         case 2:
7264                 return 0x3;
7265         case 3:
7266                 return 0x7;
7267         case 4:
7268                 return 0xf;
7269         case 5:
7270                 return 0x1f;
7271         case 6:
7272         default:
7273                 return 0x3f;
7274         }
7275 }
7276
7277 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7278                                   struct amdgpu_encoder *aencoder,
7279                                   uint32_t link_index)
7280 {
7281         struct amdgpu_device *adev = drm_to_adev(dev);
7282
7283         int res = drm_encoder_init(dev,
7284                                    &aencoder->base,
7285                                    &amdgpu_dm_encoder_funcs,
7286                                    DRM_MODE_ENCODER_TMDS,
7287                                    NULL);
7288
7289         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7290
7291         if (!res)
7292                 aencoder->encoder_id = link_index;
7293         else
7294                 aencoder->encoder_id = -1;
7295
7296         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7297
7298         return res;
7299 }
7300
7301 static void manage_dm_interrupts(struct amdgpu_device *adev,
7302                                  struct amdgpu_crtc *acrtc,
7303                                  bool enable)
7304 {
7305         /*
7306          * We have no guarantee that the frontend index maps to the same
7307          * backend index - some even map to more than one.
7308          *
7309          * TODO: Use a different interrupt or check DC itself for the mapping.
7310          */
7311         int irq_type =
7312                 amdgpu_display_crtc_idx_to_irq_type(
7313                         adev,
7314                         acrtc->crtc_id);
7315
7316         if (enable) {
7317                 drm_crtc_vblank_on(&acrtc->base);
7318                 amdgpu_irq_get(
7319                         adev,
7320                         &adev->pageflip_irq,
7321                         irq_type);
7322         } else {
7323
7324                 amdgpu_irq_put(
7325                         adev,
7326                         &adev->pageflip_irq,
7327                         irq_type);
7328                 drm_crtc_vblank_off(&acrtc->base);
7329         }
7330 }
7331
7332 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7333                                       struct amdgpu_crtc *acrtc)
7334 {
7335         int irq_type =
7336                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7337
7338         /**
7339          * This reads the current state for the IRQ and force reapplies
7340          * the setting to hardware.
7341          */
7342         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7343 }
7344
7345 static bool
7346 is_scaling_state_different(const struct dm_connector_state *dm_state,
7347                            const struct dm_connector_state *old_dm_state)
7348 {
7349         if (dm_state->scaling != old_dm_state->scaling)
7350                 return true;
7351         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7352                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7353                         return true;
7354         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7355                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7356                         return true;
7357         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7358                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7359                 return true;
7360         return false;
7361 }
7362
7363 #ifdef CONFIG_DRM_AMD_DC_HDCP
7364 static bool is_content_protection_different(struct drm_connector_state *state,
7365                                             const struct drm_connector_state *old_state,
7366                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7367 {
7368         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7369         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7370
7371         /* Handle: Type0/1 change */
7372         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7373             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7374                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7375                 return true;
7376         }
7377
7378         /* CP is being re enabled, ignore this
7379          *
7380          * Handles:     ENABLED -> DESIRED
7381          */
7382         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7383             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7384                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7385                 return false;
7386         }
7387
7388         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7389          *
7390          * Handles:     UNDESIRED -> ENABLED
7391          */
7392         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7393             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7394                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7395
7396         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7397          * hot-plug, headless s3, dpms
7398          *
7399          * Handles:     DESIRED -> DESIRED (Special case)
7400          */
7401         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7402             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7403                 dm_con_state->update_hdcp = false;
7404                 return true;
7405         }
7406
7407         /*
7408          * Handles:     UNDESIRED -> UNDESIRED
7409          *              DESIRED -> DESIRED
7410          *              ENABLED -> ENABLED
7411          */
7412         if (old_state->content_protection == state->content_protection)
7413                 return false;
7414
7415         /*
7416          * Handles:     UNDESIRED -> DESIRED
7417          *              DESIRED -> UNDESIRED
7418          *              ENABLED -> UNDESIRED
7419          */
7420         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7421                 return true;
7422
7423         /*
7424          * Handles:     DESIRED -> ENABLED
7425          */
7426         return false;
7427 }
7428
7429 #endif
7430 static void remove_stream(struct amdgpu_device *adev,
7431                           struct amdgpu_crtc *acrtc,
7432                           struct dc_stream_state *stream)
7433 {
7434         /* this is the update mode case */
7435
7436         acrtc->otg_inst = -1;
7437         acrtc->enabled = false;
7438 }
7439
7440 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7441                                struct dc_cursor_position *position)
7442 {
7443         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7444         int x, y;
7445         int xorigin = 0, yorigin = 0;
7446
7447         position->enable = false;
7448         position->x = 0;
7449         position->y = 0;
7450
7451         if (!crtc || !plane->state->fb)
7452                 return 0;
7453
7454         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7455             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7456                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7457                           __func__,
7458                           plane->state->crtc_w,
7459                           plane->state->crtc_h);
7460                 return -EINVAL;
7461         }
7462
7463         x = plane->state->crtc_x;
7464         y = plane->state->crtc_y;
7465
7466         if (x <= -amdgpu_crtc->max_cursor_width ||
7467             y <= -amdgpu_crtc->max_cursor_height)
7468                 return 0;
7469
7470         if (x < 0) {
7471                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7472                 x = 0;
7473         }
7474         if (y < 0) {
7475                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7476                 y = 0;
7477         }
7478         position->enable = true;
7479         position->translate_by_source = true;
7480         position->x = x;
7481         position->y = y;
7482         position->x_hotspot = xorigin;
7483         position->y_hotspot = yorigin;
7484
7485         return 0;
7486 }
7487
7488 static void handle_cursor_update(struct drm_plane *plane,
7489                                  struct drm_plane_state *old_plane_state)
7490 {
7491         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7492         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7493         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7494         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7495         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7496         uint64_t address = afb ? afb->address : 0;
7497         struct dc_cursor_position position;
7498         struct dc_cursor_attributes attributes;
7499         int ret;
7500
7501         if (!plane->state->fb && !old_plane_state->fb)
7502                 return;
7503
7504         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7505                          __func__,
7506                          amdgpu_crtc->crtc_id,
7507                          plane->state->crtc_w,
7508                          plane->state->crtc_h);
7509
7510         ret = get_cursor_position(plane, crtc, &position);
7511         if (ret)
7512                 return;
7513
7514         if (!position.enable) {
7515                 /* turn off cursor */
7516                 if (crtc_state && crtc_state->stream) {
7517                         mutex_lock(&adev->dm.dc_lock);
7518                         dc_stream_set_cursor_position(crtc_state->stream,
7519                                                       &position);
7520                         mutex_unlock(&adev->dm.dc_lock);
7521                 }
7522                 return;
7523         }
7524
7525         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7526         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7527
7528         memset(&attributes, 0, sizeof(attributes));
7529         attributes.address.high_part = upper_32_bits(address);
7530         attributes.address.low_part  = lower_32_bits(address);
7531         attributes.width             = plane->state->crtc_w;
7532         attributes.height            = plane->state->crtc_h;
7533         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7534         attributes.rotation_angle    = 0;
7535         attributes.attribute_flags.value = 0;
7536
7537         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7538
7539         if (crtc_state->stream) {
7540                 mutex_lock(&adev->dm.dc_lock);
7541                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7542                                                          &attributes))
7543                         DRM_ERROR("DC failed to set cursor attributes\n");
7544
7545                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7546                                                    &position))
7547                         DRM_ERROR("DC failed to set cursor position\n");
7548                 mutex_unlock(&adev->dm.dc_lock);
7549         }
7550 }
7551
7552 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7553 {
7554
7555         assert_spin_locked(&acrtc->base.dev->event_lock);
7556         WARN_ON(acrtc->event);
7557
7558         acrtc->event = acrtc->base.state->event;
7559
7560         /* Set the flip status */
7561         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7562
7563         /* Mark this event as consumed */
7564         acrtc->base.state->event = NULL;
7565
7566         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7567                                                  acrtc->crtc_id);
7568 }
7569
7570 static void update_freesync_state_on_stream(
7571         struct amdgpu_display_manager *dm,
7572         struct dm_crtc_state *new_crtc_state,
7573         struct dc_stream_state *new_stream,
7574         struct dc_plane_state *surface,
7575         u32 flip_timestamp_in_us)
7576 {
7577         struct mod_vrr_params vrr_params;
7578         struct dc_info_packet vrr_infopacket = {0};
7579         struct amdgpu_device *adev = dm->adev;
7580         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7581         unsigned long flags;
7582
7583         if (!new_stream)
7584                 return;
7585
7586         /*
7587          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7588          * For now it's sufficient to just guard against these conditions.
7589          */
7590
7591         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7592                 return;
7593
7594         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7595         vrr_params = acrtc->dm_irq_params.vrr_params;
7596
7597         if (surface) {
7598                 mod_freesync_handle_preflip(
7599                         dm->freesync_module,
7600                         surface,
7601                         new_stream,
7602                         flip_timestamp_in_us,
7603                         &vrr_params);
7604
7605                 if (adev->family < AMDGPU_FAMILY_AI &&
7606                     amdgpu_dm_vrr_active(new_crtc_state)) {
7607                         mod_freesync_handle_v_update(dm->freesync_module,
7608                                                      new_stream, &vrr_params);
7609
7610                         /* Need to call this before the frame ends. */
7611                         dc_stream_adjust_vmin_vmax(dm->dc,
7612                                                    new_crtc_state->stream,
7613                                                    &vrr_params.adjust);
7614                 }
7615         }
7616
7617         mod_freesync_build_vrr_infopacket(
7618                 dm->freesync_module,
7619                 new_stream,
7620                 &vrr_params,
7621                 PACKET_TYPE_VRR,
7622                 TRANSFER_FUNC_UNKNOWN,
7623                 &vrr_infopacket);
7624
7625         new_crtc_state->freesync_timing_changed |=
7626                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7627                         &vrr_params.adjust,
7628                         sizeof(vrr_params.adjust)) != 0);
7629
7630         new_crtc_state->freesync_vrr_info_changed |=
7631                 (memcmp(&new_crtc_state->vrr_infopacket,
7632                         &vrr_infopacket,
7633                         sizeof(vrr_infopacket)) != 0);
7634
7635         acrtc->dm_irq_params.vrr_params = vrr_params;
7636         new_crtc_state->vrr_infopacket = vrr_infopacket;
7637
7638         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7639         new_stream->vrr_infopacket = vrr_infopacket;
7640
7641         if (new_crtc_state->freesync_vrr_info_changed)
7642                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7643                               new_crtc_state->base.crtc->base.id,
7644                               (int)new_crtc_state->base.vrr_enabled,
7645                               (int)vrr_params.state);
7646
7647         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7648 }
7649
7650 static void update_stream_irq_parameters(
7651         struct amdgpu_display_manager *dm,
7652         struct dm_crtc_state *new_crtc_state)
7653 {
7654         struct dc_stream_state *new_stream = new_crtc_state->stream;
7655         struct mod_vrr_params vrr_params;
7656         struct mod_freesync_config config = new_crtc_state->freesync_config;
7657         struct amdgpu_device *adev = dm->adev;
7658         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7659         unsigned long flags;
7660
7661         if (!new_stream)
7662                 return;
7663
7664         /*
7665          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7666          * For now it's sufficient to just guard against these conditions.
7667          */
7668         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7669                 return;
7670
7671         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7672         vrr_params = acrtc->dm_irq_params.vrr_params;
7673
7674         if (new_crtc_state->vrr_supported &&
7675             config.min_refresh_in_uhz &&
7676             config.max_refresh_in_uhz) {
7677                 config.state = new_crtc_state->base.vrr_enabled ?
7678                         VRR_STATE_ACTIVE_VARIABLE :
7679                         VRR_STATE_INACTIVE;
7680         } else {
7681                 config.state = VRR_STATE_UNSUPPORTED;
7682         }
7683
7684         mod_freesync_build_vrr_params(dm->freesync_module,
7685                                       new_stream,
7686                                       &config, &vrr_params);
7687
7688         new_crtc_state->freesync_timing_changed |=
7689                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7690                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7691
7692         new_crtc_state->freesync_config = config;
7693         /* Copy state for access from DM IRQ handler */
7694         acrtc->dm_irq_params.freesync_config = config;
7695         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7696         acrtc->dm_irq_params.vrr_params = vrr_params;
7697         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7698 }
7699
7700 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7701                                             struct dm_crtc_state *new_state)
7702 {
7703         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7704         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7705
7706         if (!old_vrr_active && new_vrr_active) {
7707                 /* Transition VRR inactive -> active:
7708                  * While VRR is active, we must not disable vblank irq, as a
7709                  * reenable after disable would compute bogus vblank/pflip
7710                  * timestamps if it likely happened inside display front-porch.
7711                  *
7712                  * We also need vupdate irq for the actual core vblank handling
7713                  * at end of vblank.
7714                  */
7715                 dm_set_vupdate_irq(new_state->base.crtc, true);
7716                 drm_crtc_vblank_get(new_state->base.crtc);
7717                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7718                                  __func__, new_state->base.crtc->base.id);
7719         } else if (old_vrr_active && !new_vrr_active) {
7720                 /* Transition VRR active -> inactive:
7721                  * Allow vblank irq disable again for fixed refresh rate.
7722                  */
7723                 dm_set_vupdate_irq(new_state->base.crtc, false);
7724                 drm_crtc_vblank_put(new_state->base.crtc);
7725                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7726                                  __func__, new_state->base.crtc->base.id);
7727         }
7728 }
7729
7730 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7731 {
7732         struct drm_plane *plane;
7733         struct drm_plane_state *old_plane_state, *new_plane_state;
7734         int i;
7735
7736         /*
7737          * TODO: Make this per-stream so we don't issue redundant updates for
7738          * commits with multiple streams.
7739          */
7740         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7741                                        new_plane_state, i)
7742                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7743                         handle_cursor_update(plane, old_plane_state);
7744 }
7745
7746 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7747                                     struct dc_state *dc_state,
7748                                     struct drm_device *dev,
7749                                     struct amdgpu_display_manager *dm,
7750                                     struct drm_crtc *pcrtc,
7751                                     bool wait_for_vblank)
7752 {
7753         uint32_t i;
7754         uint64_t timestamp_ns;
7755         struct drm_plane *plane;
7756         struct drm_plane_state *old_plane_state, *new_plane_state;
7757         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7758         struct drm_crtc_state *new_pcrtc_state =
7759                         drm_atomic_get_new_crtc_state(state, pcrtc);
7760         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7761         struct dm_crtc_state *dm_old_crtc_state =
7762                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7763         int planes_count = 0, vpos, hpos;
7764         long r;
7765         unsigned long flags;
7766         struct amdgpu_bo *abo;
7767         uint32_t target_vblank, last_flip_vblank;
7768         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7769         bool pflip_present = false;
7770         struct {
7771                 struct dc_surface_update surface_updates[MAX_SURFACES];
7772                 struct dc_plane_info plane_infos[MAX_SURFACES];
7773                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7774                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7775                 struct dc_stream_update stream_update;
7776         } *bundle;
7777
7778         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7779
7780         if (!bundle) {
7781                 dm_error("Failed to allocate update bundle\n");
7782                 goto cleanup;
7783         }
7784
7785         /*
7786          * Disable the cursor first if we're disabling all the planes.
7787          * It'll remain on the screen after the planes are re-enabled
7788          * if we don't.
7789          */
7790         if (acrtc_state->active_planes == 0)
7791                 amdgpu_dm_commit_cursors(state);
7792
7793         /* update planes when needed */
7794         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7795                 struct drm_crtc *crtc = new_plane_state->crtc;
7796                 struct drm_crtc_state *new_crtc_state;
7797                 struct drm_framebuffer *fb = new_plane_state->fb;
7798                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7799                 bool plane_needs_flip;
7800                 struct dc_plane_state *dc_plane;
7801                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7802
7803                 /* Cursor plane is handled after stream updates */
7804                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7805                         continue;
7806
7807                 if (!fb || !crtc || pcrtc != crtc)
7808                         continue;
7809
7810                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7811                 if (!new_crtc_state->active)
7812                         continue;
7813
7814                 dc_plane = dm_new_plane_state->dc_state;
7815
7816                 bundle->surface_updates[planes_count].surface = dc_plane;
7817                 if (new_pcrtc_state->color_mgmt_changed) {
7818                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7819                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7820                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7821                 }
7822
7823                 fill_dc_scaling_info(new_plane_state,
7824                                      &bundle->scaling_infos[planes_count]);
7825
7826                 bundle->surface_updates[planes_count].scaling_info =
7827                         &bundle->scaling_infos[planes_count];
7828
7829                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7830
7831                 pflip_present = pflip_present || plane_needs_flip;
7832
7833                 if (!plane_needs_flip) {
7834                         planes_count += 1;
7835                         continue;
7836                 }
7837
7838                 abo = gem_to_amdgpu_bo(fb->obj[0]);
7839
7840                 /*
7841                  * Wait for all fences on this FB. Do limited wait to avoid
7842                  * deadlock during GPU reset when this fence will not signal
7843                  * but we hold reservation lock for the BO.
7844                  */
7845                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7846                                                         false,
7847                                                         msecs_to_jiffies(5000));
7848                 if (unlikely(r <= 0))
7849                         DRM_ERROR("Waiting for fences timed out!");
7850
7851                 fill_dc_plane_info_and_addr(
7852                         dm->adev, new_plane_state,
7853                         afb->tiling_flags,
7854                         &bundle->plane_infos[planes_count],
7855                         &bundle->flip_addrs[planes_count].address,
7856                         afb->tmz_surface, false);
7857
7858                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7859                                  new_plane_state->plane->index,
7860                                  bundle->plane_infos[planes_count].dcc.enable);
7861
7862                 bundle->surface_updates[planes_count].plane_info =
7863                         &bundle->plane_infos[planes_count];
7864
7865                 /*
7866                  * Only allow immediate flips for fast updates that don't
7867                  * change FB pitch, DCC state, rotation or mirroing.
7868                  */
7869                 bundle->flip_addrs[planes_count].flip_immediate =
7870                         crtc->state->async_flip &&
7871                         acrtc_state->update_type == UPDATE_TYPE_FAST;
7872
7873                 timestamp_ns = ktime_get_ns();
7874                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7875                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7876                 bundle->surface_updates[planes_count].surface = dc_plane;
7877
7878                 if (!bundle->surface_updates[planes_count].surface) {
7879                         DRM_ERROR("No surface for CRTC: id=%d\n",
7880                                         acrtc_attach->crtc_id);
7881                         continue;
7882                 }
7883
7884                 if (plane == pcrtc->primary)
7885                         update_freesync_state_on_stream(
7886                                 dm,
7887                                 acrtc_state,
7888                                 acrtc_state->stream,
7889                                 dc_plane,
7890                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7891
7892                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7893                                  __func__,
7894                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7895                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7896
7897                 planes_count += 1;
7898
7899         }
7900
7901         if (pflip_present) {
7902                 if (!vrr_active) {
7903                         /* Use old throttling in non-vrr fixed refresh rate mode
7904                          * to keep flip scheduling based on target vblank counts
7905                          * working in a backwards compatible way, e.g., for
7906                          * clients using the GLX_OML_sync_control extension or
7907                          * DRI3/Present extension with defined target_msc.
7908                          */
7909                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7910                 }
7911                 else {
7912                         /* For variable refresh rate mode only:
7913                          * Get vblank of last completed flip to avoid > 1 vrr
7914                          * flips per video frame by use of throttling, but allow
7915                          * flip programming anywhere in the possibly large
7916                          * variable vrr vblank interval for fine-grained flip
7917                          * timing control and more opportunity to avoid stutter
7918                          * on late submission of flips.
7919                          */
7920                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7921                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7922                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7923                 }
7924
7925                 target_vblank = last_flip_vblank + wait_for_vblank;
7926
7927                 /*
7928                  * Wait until we're out of the vertical blank period before the one
7929                  * targeted by the flip
7930                  */
7931                 while ((acrtc_attach->enabled &&
7932                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7933                                                             0, &vpos, &hpos, NULL,
7934                                                             NULL, &pcrtc->hwmode)
7935                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7936                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7937                         (int)(target_vblank -
7938                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7939                         usleep_range(1000, 1100);
7940                 }
7941
7942                 /**
7943                  * Prepare the flip event for the pageflip interrupt to handle.
7944                  *
7945                  * This only works in the case where we've already turned on the
7946                  * appropriate hardware blocks (eg. HUBP) so in the transition case
7947                  * from 0 -> n planes we have to skip a hardware generated event
7948                  * and rely on sending it from software.
7949                  */
7950                 if (acrtc_attach->base.state->event &&
7951                     acrtc_state->active_planes > 0) {
7952                         drm_crtc_vblank_get(pcrtc);
7953
7954                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7955
7956                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7957                         prepare_flip_isr(acrtc_attach);
7958
7959                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7960                 }
7961
7962                 if (acrtc_state->stream) {
7963                         if (acrtc_state->freesync_vrr_info_changed)
7964                                 bundle->stream_update.vrr_infopacket =
7965                                         &acrtc_state->stream->vrr_infopacket;
7966                 }
7967         }
7968
7969         /* Update the planes if changed or disable if we don't have any. */
7970         if ((planes_count || acrtc_state->active_planes == 0) &&
7971                 acrtc_state->stream) {
7972                 bundle->stream_update.stream = acrtc_state->stream;
7973                 if (new_pcrtc_state->mode_changed) {
7974                         bundle->stream_update.src = acrtc_state->stream->src;
7975                         bundle->stream_update.dst = acrtc_state->stream->dst;
7976                 }
7977
7978                 if (new_pcrtc_state->color_mgmt_changed) {
7979                         /*
7980                          * TODO: This isn't fully correct since we've actually
7981                          * already modified the stream in place.
7982                          */
7983                         bundle->stream_update.gamut_remap =
7984                                 &acrtc_state->stream->gamut_remap_matrix;
7985                         bundle->stream_update.output_csc_transform =
7986                                 &acrtc_state->stream->csc_color_matrix;
7987                         bundle->stream_update.out_transfer_func =
7988                                 acrtc_state->stream->out_transfer_func;
7989                 }
7990
7991                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7992                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7993                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7994
7995                 /*
7996                  * If FreeSync state on the stream has changed then we need to
7997                  * re-adjust the min/max bounds now that DC doesn't handle this
7998                  * as part of commit.
7999                  */
8000                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
8001                     amdgpu_dm_vrr_active(acrtc_state)) {
8002                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8003                         dc_stream_adjust_vmin_vmax(
8004                                 dm->dc, acrtc_state->stream,
8005                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8006                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8007                 }
8008                 mutex_lock(&dm->dc_lock);
8009                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8010                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8011                         amdgpu_dm_psr_disable(acrtc_state->stream);
8012
8013                 dc_commit_updates_for_stream(dm->dc,
8014                                                      bundle->surface_updates,
8015                                                      planes_count,
8016                                                      acrtc_state->stream,
8017                                                      &bundle->stream_update,
8018                                                      dc_state);
8019
8020                 /**
8021                  * Enable or disable the interrupts on the backend.
8022                  *
8023                  * Most pipes are put into power gating when unused.
8024                  *
8025                  * When power gating is enabled on a pipe we lose the
8026                  * interrupt enablement state when power gating is disabled.
8027                  *
8028                  * So we need to update the IRQ control state in hardware
8029                  * whenever the pipe turns on (since it could be previously
8030                  * power gated) or off (since some pipes can't be power gated
8031                  * on some ASICs).
8032                  */
8033                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8034                         dm_update_pflip_irq_state(drm_to_adev(dev),
8035                                                   acrtc_attach);
8036
8037                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8038                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8039                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8040                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8041                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8042                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8043                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8044                         amdgpu_dm_psr_enable(acrtc_state->stream);
8045                 }
8046
8047                 mutex_unlock(&dm->dc_lock);
8048         }
8049
8050         /*
8051          * Update cursor state *after* programming all the planes.
8052          * This avoids redundant programming in the case where we're going
8053          * to be disabling a single plane - those pipes are being disabled.
8054          */
8055         if (acrtc_state->active_planes)
8056                 amdgpu_dm_commit_cursors(state);
8057
8058 cleanup:
8059         kfree(bundle);
8060 }
8061
8062 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8063                                    struct drm_atomic_state *state)
8064 {
8065         struct amdgpu_device *adev = drm_to_adev(dev);
8066         struct amdgpu_dm_connector *aconnector;
8067         struct drm_connector *connector;
8068         struct drm_connector_state *old_con_state, *new_con_state;
8069         struct drm_crtc_state *new_crtc_state;
8070         struct dm_crtc_state *new_dm_crtc_state;
8071         const struct dc_stream_status *status;
8072         int i, inst;
8073
8074         /* Notify device removals. */
8075         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8076                 if (old_con_state->crtc != new_con_state->crtc) {
8077                         /* CRTC changes require notification. */
8078                         goto notify;
8079                 }
8080
8081                 if (!new_con_state->crtc)
8082                         continue;
8083
8084                 new_crtc_state = drm_atomic_get_new_crtc_state(
8085                         state, new_con_state->crtc);
8086
8087                 if (!new_crtc_state)
8088                         continue;
8089
8090                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8091                         continue;
8092
8093         notify:
8094                 aconnector = to_amdgpu_dm_connector(connector);
8095
8096                 mutex_lock(&adev->dm.audio_lock);
8097                 inst = aconnector->audio_inst;
8098                 aconnector->audio_inst = -1;
8099                 mutex_unlock(&adev->dm.audio_lock);
8100
8101                 amdgpu_dm_audio_eld_notify(adev, inst);
8102         }
8103
8104         /* Notify audio device additions. */
8105         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8106                 if (!new_con_state->crtc)
8107                         continue;
8108
8109                 new_crtc_state = drm_atomic_get_new_crtc_state(
8110                         state, new_con_state->crtc);
8111
8112                 if (!new_crtc_state)
8113                         continue;
8114
8115                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8116                         continue;
8117
8118                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8119                 if (!new_dm_crtc_state->stream)
8120                         continue;
8121
8122                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8123                 if (!status)
8124                         continue;
8125
8126                 aconnector = to_amdgpu_dm_connector(connector);
8127
8128                 mutex_lock(&adev->dm.audio_lock);
8129                 inst = status->audio_inst;
8130                 aconnector->audio_inst = inst;
8131                 mutex_unlock(&adev->dm.audio_lock);
8132
8133                 amdgpu_dm_audio_eld_notify(adev, inst);
8134         }
8135 }
8136
8137 /*
8138  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8139  * @crtc_state: the DRM CRTC state
8140  * @stream_state: the DC stream state.
8141  *
8142  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8143  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8144  */
8145 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8146                                                 struct dc_stream_state *stream_state)
8147 {
8148         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8149 }
8150
8151 /**
8152  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8153  * @state: The atomic state to commit
8154  *
8155  * This will tell DC to commit the constructed DC state from atomic_check,
8156  * programming the hardware. Any failures here implies a hardware failure, since
8157  * atomic check should have filtered anything non-kosher.
8158  */
8159 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8160 {
8161         struct drm_device *dev = state->dev;
8162         struct amdgpu_device *adev = drm_to_adev(dev);
8163         struct amdgpu_display_manager *dm = &adev->dm;
8164         struct dm_atomic_state *dm_state;
8165         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8166         uint32_t i, j;
8167         struct drm_crtc *crtc;
8168         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8169         unsigned long flags;
8170         bool wait_for_vblank = true;
8171         struct drm_connector *connector;
8172         struct drm_connector_state *old_con_state, *new_con_state;
8173         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8174         int crtc_disable_count = 0;
8175         bool mode_set_reset_required = false;
8176
8177         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8178
8179         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8180
8181         dm_state = dm_atomic_get_new_state(state);
8182         if (dm_state && dm_state->context) {
8183                 dc_state = dm_state->context;
8184         } else {
8185                 /* No state changes, retain current state. */
8186                 dc_state_temp = dc_create_state(dm->dc);
8187                 ASSERT(dc_state_temp);
8188                 dc_state = dc_state_temp;
8189                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8190         }
8191
8192         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8193                                        new_crtc_state, i) {
8194                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8195
8196                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8197
8198                 if (old_crtc_state->active &&
8199                     (!new_crtc_state->active ||
8200                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8201                         manage_dm_interrupts(adev, acrtc, false);
8202                         dc_stream_release(dm_old_crtc_state->stream);
8203                 }
8204         }
8205
8206         drm_atomic_helper_calc_timestamping_constants(state);
8207
8208         /* update changed items */
8209         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8210                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8211
8212                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8213                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8214
8215                 DRM_DEBUG_DRIVER(
8216                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8217                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8218                         "connectors_changed:%d\n",
8219                         acrtc->crtc_id,
8220                         new_crtc_state->enable,
8221                         new_crtc_state->active,
8222                         new_crtc_state->planes_changed,
8223                         new_crtc_state->mode_changed,
8224                         new_crtc_state->active_changed,
8225                         new_crtc_state->connectors_changed);
8226
8227                 /* Disable cursor if disabling crtc */
8228                 if (old_crtc_state->active && !new_crtc_state->active) {
8229                         struct dc_cursor_position position;
8230
8231                         memset(&position, 0, sizeof(position));
8232                         mutex_lock(&dm->dc_lock);
8233                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8234                         mutex_unlock(&dm->dc_lock);
8235                 }
8236
8237                 /* Copy all transient state flags into dc state */
8238                 if (dm_new_crtc_state->stream) {
8239                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8240                                                             dm_new_crtc_state->stream);
8241                 }
8242
8243                 /* handles headless hotplug case, updating new_state and
8244                  * aconnector as needed
8245                  */
8246
8247                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8248
8249                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8250
8251                         if (!dm_new_crtc_state->stream) {
8252                                 /*
8253                                  * this could happen because of issues with
8254                                  * userspace notifications delivery.
8255                                  * In this case userspace tries to set mode on
8256                                  * display which is disconnected in fact.
8257                                  * dc_sink is NULL in this case on aconnector.
8258                                  * We expect reset mode will come soon.
8259                                  *
8260                                  * This can also happen when unplug is done
8261                                  * during resume sequence ended
8262                                  *
8263                                  * In this case, we want to pretend we still
8264                                  * have a sink to keep the pipe running so that
8265                                  * hw state is consistent with the sw state
8266                                  */
8267                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8268                                                 __func__, acrtc->base.base.id);
8269                                 continue;
8270                         }
8271
8272                         if (dm_old_crtc_state->stream)
8273                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8274
8275                         pm_runtime_get_noresume(dev->dev);
8276
8277                         acrtc->enabled = true;
8278                         acrtc->hw_mode = new_crtc_state->mode;
8279                         crtc->hwmode = new_crtc_state->mode;
8280                         mode_set_reset_required = true;
8281                 } else if (modereset_required(new_crtc_state)) {
8282                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8283                         /* i.e. reset mode */
8284                         if (dm_old_crtc_state->stream)
8285                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8286                         mode_set_reset_required = true;
8287                 }
8288         } /* for_each_crtc_in_state() */
8289
8290         if (dc_state) {
8291                 /* if there mode set or reset, disable eDP PSR */
8292                 if (mode_set_reset_required)
8293                         amdgpu_dm_psr_disable_all(dm);
8294
8295                 dm_enable_per_frame_crtc_master_sync(dc_state);
8296                 mutex_lock(&dm->dc_lock);
8297                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8298                 mutex_unlock(&dm->dc_lock);
8299         }
8300
8301         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8302                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8303
8304                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8305
8306                 if (dm_new_crtc_state->stream != NULL) {
8307                         const struct dc_stream_status *status =
8308                                         dc_stream_get_status(dm_new_crtc_state->stream);
8309
8310                         if (!status)
8311                                 status = dc_stream_get_status_from_state(dc_state,
8312                                                                          dm_new_crtc_state->stream);
8313                         if (!status)
8314                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8315                         else
8316                                 acrtc->otg_inst = status->primary_otg_inst;
8317                 }
8318         }
8319 #ifdef CONFIG_DRM_AMD_DC_HDCP
8320         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8321                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8322                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8323                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8324
8325                 new_crtc_state = NULL;
8326
8327                 if (acrtc)
8328                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8329
8330                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8331
8332                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8333                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8334                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8335                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8336                         dm_new_con_state->update_hdcp = true;
8337                         continue;
8338                 }
8339
8340                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8341                         hdcp_update_display(
8342                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8343                                 new_con_state->hdcp_content_type,
8344                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8345                                                                                                          : false);
8346         }
8347 #endif
8348
8349         /* Handle connector state changes */
8350         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8351                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8352                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8353                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8354                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8355                 struct dc_stream_update stream_update;
8356                 struct dc_info_packet hdr_packet;
8357                 struct dc_stream_status *status = NULL;
8358                 bool abm_changed, hdr_changed, scaling_changed;
8359
8360                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8361                 memset(&stream_update, 0, sizeof(stream_update));
8362
8363                 if (acrtc) {
8364                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8365                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8366                 }
8367
8368                 /* Skip any modesets/resets */
8369                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8370                         continue;
8371
8372                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8373                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8374
8375                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8376                                                              dm_old_con_state);
8377
8378                 abm_changed = dm_new_crtc_state->abm_level !=
8379                               dm_old_crtc_state->abm_level;
8380
8381                 hdr_changed =
8382                         is_hdr_metadata_different(old_con_state, new_con_state);
8383
8384                 if (!scaling_changed && !abm_changed && !hdr_changed)
8385                         continue;
8386
8387                 stream_update.stream = dm_new_crtc_state->stream;
8388                 if (scaling_changed) {
8389                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8390                                         dm_new_con_state, dm_new_crtc_state->stream);
8391
8392                         stream_update.src = dm_new_crtc_state->stream->src;
8393                         stream_update.dst = dm_new_crtc_state->stream->dst;
8394                 }
8395
8396                 if (abm_changed) {
8397                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8398
8399                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8400                 }
8401
8402                 if (hdr_changed) {
8403                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8404                         stream_update.hdr_static_metadata = &hdr_packet;
8405                 }
8406
8407                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8408                 WARN_ON(!status);
8409                 WARN_ON(!status->plane_count);
8410
8411                 /*
8412                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8413                  * Here we create an empty update on each plane.
8414                  * To fix this, DC should permit updating only stream properties.
8415                  */
8416                 for (j = 0; j < status->plane_count; j++)
8417                         dummy_updates[j].surface = status->plane_states[0];
8418
8419
8420                 mutex_lock(&dm->dc_lock);
8421                 dc_commit_updates_for_stream(dm->dc,
8422                                                      dummy_updates,
8423                                                      status->plane_count,
8424                                                      dm_new_crtc_state->stream,
8425                                                      &stream_update,
8426                                                      dc_state);
8427                 mutex_unlock(&dm->dc_lock);
8428         }
8429
8430         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8431         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8432                                       new_crtc_state, i) {
8433                 if (old_crtc_state->active && !new_crtc_state->active)
8434                         crtc_disable_count++;
8435
8436                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8437                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8438
8439                 /* For freesync config update on crtc state and params for irq */
8440                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8441
8442                 /* Handle vrr on->off / off->on transitions */
8443                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8444                                                 dm_new_crtc_state);
8445         }
8446
8447         /**
8448          * Enable interrupts for CRTCs that are newly enabled or went through
8449          * a modeset. It was intentionally deferred until after the front end
8450          * state was modified to wait until the OTG was on and so the IRQ
8451          * handlers didn't access stale or invalid state.
8452          */
8453         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8454                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8455                 bool configure_crc = false;
8456
8457                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8458
8459                 if (new_crtc_state->active &&
8460                     (!old_crtc_state->active ||
8461                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8462                         dc_stream_retain(dm_new_crtc_state->stream);
8463                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8464                         manage_dm_interrupts(adev, acrtc, true);
8465                 }
8466                 if (IS_ENABLED(CONFIG_DEBUG_FS) && new_crtc_state->active &&
8467                         amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8468                         /**
8469                          * Frontend may have changed so reapply the CRC capture
8470                          * settings for the stream.
8471                          */
8472                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8473                         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8474
8475                         if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
8476                                 if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
8477                                         configure_crc = true;
8478                         } else {
8479                                 if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
8480                                         configure_crc = true;
8481                         }
8482
8483                         if (configure_crc)
8484                                 amdgpu_dm_crtc_configure_crc_source(
8485                                         crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
8486                 }
8487         }
8488
8489         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8490                 if (new_crtc_state->async_flip)
8491                         wait_for_vblank = false;
8492
8493         /* update planes when needed per crtc*/
8494         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8495                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8496
8497                 if (dm_new_crtc_state->stream)
8498                         amdgpu_dm_commit_planes(state, dc_state, dev,
8499                                                 dm, crtc, wait_for_vblank);
8500         }
8501
8502         /* Update audio instances for each connector. */
8503         amdgpu_dm_commit_audio(dev, state);
8504
8505         /*
8506          * send vblank event on all events not handled in flip and
8507          * mark consumed event for drm_atomic_helper_commit_hw_done
8508          */
8509         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8510         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8511
8512                 if (new_crtc_state->event)
8513                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8514
8515                 new_crtc_state->event = NULL;
8516         }
8517         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8518
8519         /* Signal HW programming completion */
8520         drm_atomic_helper_commit_hw_done(state);
8521
8522         if (wait_for_vblank)
8523                 drm_atomic_helper_wait_for_flip_done(dev, state);
8524
8525         drm_atomic_helper_cleanup_planes(dev, state);
8526
8527         /* return the stolen vga memory back to VRAM */
8528         if (!adev->mman.keep_stolen_vga_memory)
8529                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8530         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8531
8532         /*
8533          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8534          * so we can put the GPU into runtime suspend if we're not driving any
8535          * displays anymore
8536          */
8537         for (i = 0; i < crtc_disable_count; i++)
8538                 pm_runtime_put_autosuspend(dev->dev);
8539         pm_runtime_mark_last_busy(dev->dev);
8540
8541         if (dc_state_temp)
8542                 dc_release_state(dc_state_temp);
8543 }
8544
8545
8546 static int dm_force_atomic_commit(struct drm_connector *connector)
8547 {
8548         int ret = 0;
8549         struct drm_device *ddev = connector->dev;
8550         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8551         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8552         struct drm_plane *plane = disconnected_acrtc->base.primary;
8553         struct drm_connector_state *conn_state;
8554         struct drm_crtc_state *crtc_state;
8555         struct drm_plane_state *plane_state;
8556
8557         if (!state)
8558                 return -ENOMEM;
8559
8560         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8561
8562         /* Construct an atomic state to restore previous display setting */
8563
8564         /*
8565          * Attach connectors to drm_atomic_state
8566          */
8567         conn_state = drm_atomic_get_connector_state(state, connector);
8568
8569         ret = PTR_ERR_OR_ZERO(conn_state);
8570         if (ret)
8571                 goto err;
8572
8573         /* Attach crtc to drm_atomic_state*/
8574         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8575
8576         ret = PTR_ERR_OR_ZERO(crtc_state);
8577         if (ret)
8578                 goto err;
8579
8580         /* force a restore */
8581         crtc_state->mode_changed = true;
8582
8583         /* Attach plane to drm_atomic_state */
8584         plane_state = drm_atomic_get_plane_state(state, plane);
8585
8586         ret = PTR_ERR_OR_ZERO(plane_state);
8587         if (ret)
8588                 goto err;
8589
8590
8591         /* Call commit internally with the state we just constructed */
8592         ret = drm_atomic_commit(state);
8593         if (!ret)
8594                 return 0;
8595
8596 err:
8597         DRM_ERROR("Restoring old state failed with %i\n", ret);
8598         drm_atomic_state_put(state);
8599
8600         return ret;
8601 }
8602
8603 /*
8604  * This function handles all cases when set mode does not come upon hotplug.
8605  * This includes when a display is unplugged then plugged back into the
8606  * same port and when running without usermode desktop manager supprot
8607  */
8608 void dm_restore_drm_connector_state(struct drm_device *dev,
8609                                     struct drm_connector *connector)
8610 {
8611         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8612         struct amdgpu_crtc *disconnected_acrtc;
8613         struct dm_crtc_state *acrtc_state;
8614
8615         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8616                 return;
8617
8618         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8619         if (!disconnected_acrtc)
8620                 return;
8621
8622         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8623         if (!acrtc_state->stream)
8624                 return;
8625
8626         /*
8627          * If the previous sink is not released and different from the current,
8628          * we deduce we are in a state where we can not rely on usermode call
8629          * to turn on the display, so we do it here
8630          */
8631         if (acrtc_state->stream->sink != aconnector->dc_sink)
8632                 dm_force_atomic_commit(&aconnector->base);
8633 }
8634
8635 /*
8636  * Grabs all modesetting locks to serialize against any blocking commits,
8637  * Waits for completion of all non blocking commits.
8638  */
8639 static int do_aquire_global_lock(struct drm_device *dev,
8640                                  struct drm_atomic_state *state)
8641 {
8642         struct drm_crtc *crtc;
8643         struct drm_crtc_commit *commit;
8644         long ret;
8645
8646         /*
8647          * Adding all modeset locks to aquire_ctx will
8648          * ensure that when the framework release it the
8649          * extra locks we are locking here will get released to
8650          */
8651         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8652         if (ret)
8653                 return ret;
8654
8655         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8656                 spin_lock(&crtc->commit_lock);
8657                 commit = list_first_entry_or_null(&crtc->commit_list,
8658                                 struct drm_crtc_commit, commit_entry);
8659                 if (commit)
8660                         drm_crtc_commit_get(commit);
8661                 spin_unlock(&crtc->commit_lock);
8662
8663                 if (!commit)
8664                         continue;
8665
8666                 /*
8667                  * Make sure all pending HW programming completed and
8668                  * page flips done
8669                  */
8670                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8671
8672                 if (ret > 0)
8673                         ret = wait_for_completion_interruptible_timeout(
8674                                         &commit->flip_done, 10*HZ);
8675
8676                 if (ret == 0)
8677                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8678                                   "timed out\n", crtc->base.id, crtc->name);
8679
8680                 drm_crtc_commit_put(commit);
8681         }
8682
8683         return ret < 0 ? ret : 0;
8684 }
8685
8686 static void get_freesync_config_for_crtc(
8687         struct dm_crtc_state *new_crtc_state,
8688         struct dm_connector_state *new_con_state)
8689 {
8690         struct mod_freesync_config config = {0};
8691         struct amdgpu_dm_connector *aconnector =
8692                         to_amdgpu_dm_connector(new_con_state->base.connector);
8693         struct drm_display_mode *mode = &new_crtc_state->base.mode;
8694         int vrefresh = drm_mode_vrefresh(mode);
8695
8696         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8697                                         vrefresh >= aconnector->min_vfreq &&
8698                                         vrefresh <= aconnector->max_vfreq;
8699
8700         if (new_crtc_state->vrr_supported) {
8701                 new_crtc_state->stream->ignore_msa_timing_param = true;
8702                 config.state = new_crtc_state->base.vrr_enabled ?
8703                                 VRR_STATE_ACTIVE_VARIABLE :
8704                                 VRR_STATE_INACTIVE;
8705                 config.min_refresh_in_uhz =
8706                                 aconnector->min_vfreq * 1000000;
8707                 config.max_refresh_in_uhz =
8708                                 aconnector->max_vfreq * 1000000;
8709                 config.vsif_supported = true;
8710                 config.btr = true;
8711         }
8712
8713         new_crtc_state->freesync_config = config;
8714 }
8715
8716 static void reset_freesync_config_for_crtc(
8717         struct dm_crtc_state *new_crtc_state)
8718 {
8719         new_crtc_state->vrr_supported = false;
8720
8721         memset(&new_crtc_state->vrr_infopacket, 0,
8722                sizeof(new_crtc_state->vrr_infopacket));
8723 }
8724
8725 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8726                                 struct drm_atomic_state *state,
8727                                 struct drm_crtc *crtc,
8728                                 struct drm_crtc_state *old_crtc_state,
8729                                 struct drm_crtc_state *new_crtc_state,
8730                                 bool enable,
8731                                 bool *lock_and_validation_needed)
8732 {
8733         struct dm_atomic_state *dm_state = NULL;
8734         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8735         struct dc_stream_state *new_stream;
8736         int ret = 0;
8737
8738         /*
8739          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8740          * update changed items
8741          */
8742         struct amdgpu_crtc *acrtc = NULL;
8743         struct amdgpu_dm_connector *aconnector = NULL;
8744         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8745         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8746
8747         new_stream = NULL;
8748
8749         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8750         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8751         acrtc = to_amdgpu_crtc(crtc);
8752         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8753
8754         /* TODO This hack should go away */
8755         if (aconnector && enable) {
8756                 /* Make sure fake sink is created in plug-in scenario */
8757                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8758                                                             &aconnector->base);
8759                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8760                                                             &aconnector->base);
8761
8762                 if (IS_ERR(drm_new_conn_state)) {
8763                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8764                         goto fail;
8765                 }
8766
8767                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8768                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8769
8770                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8771                         goto skip_modeset;
8772
8773                 new_stream = create_validate_stream_for_sink(aconnector,
8774                                                              &new_crtc_state->mode,
8775                                                              dm_new_conn_state,
8776                                                              dm_old_crtc_state->stream);
8777
8778                 /*
8779                  * we can have no stream on ACTION_SET if a display
8780                  * was disconnected during S3, in this case it is not an
8781                  * error, the OS will be updated after detection, and
8782                  * will do the right thing on next atomic commit
8783                  */
8784
8785                 if (!new_stream) {
8786                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8787                                         __func__, acrtc->base.base.id);
8788                         ret = -ENOMEM;
8789                         goto fail;
8790                 }
8791
8792                 /*
8793                  * TODO: Check VSDB bits to decide whether this should
8794                  * be enabled or not.
8795                  */
8796                 new_stream->triggered_crtc_reset.enabled =
8797                         dm->force_timing_sync;
8798
8799                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8800
8801                 ret = fill_hdr_info_packet(drm_new_conn_state,
8802                                            &new_stream->hdr_static_metadata);
8803                 if (ret)
8804                         goto fail;
8805
8806                 /*
8807                  * If we already removed the old stream from the context
8808                  * (and set the new stream to NULL) then we can't reuse
8809                  * the old stream even if the stream and scaling are unchanged.
8810                  * We'll hit the BUG_ON and black screen.
8811                  *
8812                  * TODO: Refactor this function to allow this check to work
8813                  * in all conditions.
8814                  */
8815                 if (dm_new_crtc_state->stream &&
8816                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8817                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8818                         new_crtc_state->mode_changed = false;
8819                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8820                                          new_crtc_state->mode_changed);
8821                 }
8822         }
8823
8824         /* mode_changed flag may get updated above, need to check again */
8825         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8826                 goto skip_modeset;
8827
8828         DRM_DEBUG_DRIVER(
8829                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8830                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8831                 "connectors_changed:%d\n",
8832                 acrtc->crtc_id,
8833                 new_crtc_state->enable,
8834                 new_crtc_state->active,
8835                 new_crtc_state->planes_changed,
8836                 new_crtc_state->mode_changed,
8837                 new_crtc_state->active_changed,
8838                 new_crtc_state->connectors_changed);
8839
8840         /* Remove stream for any changed/disabled CRTC */
8841         if (!enable) {
8842
8843                 if (!dm_old_crtc_state->stream)
8844                         goto skip_modeset;
8845
8846                 ret = dm_atomic_get_state(state, &dm_state);
8847                 if (ret)
8848                         goto fail;
8849
8850                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8851                                 crtc->base.id);
8852
8853                 /* i.e. reset mode */
8854                 if (dc_remove_stream_from_ctx(
8855                                 dm->dc,
8856                                 dm_state->context,
8857                                 dm_old_crtc_state->stream) != DC_OK) {
8858                         ret = -EINVAL;
8859                         goto fail;
8860                 }
8861
8862                 dc_stream_release(dm_old_crtc_state->stream);
8863                 dm_new_crtc_state->stream = NULL;
8864
8865                 reset_freesync_config_for_crtc(dm_new_crtc_state);
8866
8867                 *lock_and_validation_needed = true;
8868
8869         } else {/* Add stream for any updated/enabled CRTC */
8870                 /*
8871                  * Quick fix to prevent NULL pointer on new_stream when
8872                  * added MST connectors not found in existing crtc_state in the chained mode
8873                  * TODO: need to dig out the root cause of that
8874                  */
8875                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8876                         goto skip_modeset;
8877
8878                 if (modereset_required(new_crtc_state))
8879                         goto skip_modeset;
8880
8881                 if (modeset_required(new_crtc_state, new_stream,
8882                                      dm_old_crtc_state->stream)) {
8883
8884                         WARN_ON(dm_new_crtc_state->stream);
8885
8886                         ret = dm_atomic_get_state(state, &dm_state);
8887                         if (ret)
8888                                 goto fail;
8889
8890                         dm_new_crtc_state->stream = new_stream;
8891
8892                         dc_stream_retain(new_stream);
8893
8894                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8895                                                 crtc->base.id);
8896
8897                         if (dc_add_stream_to_ctx(
8898                                         dm->dc,
8899                                         dm_state->context,
8900                                         dm_new_crtc_state->stream) != DC_OK) {
8901                                 ret = -EINVAL;
8902                                 goto fail;
8903                         }
8904
8905                         *lock_and_validation_needed = true;
8906                 }
8907         }
8908
8909 skip_modeset:
8910         /* Release extra reference */
8911         if (new_stream)
8912                  dc_stream_release(new_stream);
8913
8914         /*
8915          * We want to do dc stream updates that do not require a
8916          * full modeset below.
8917          */
8918         if (!(enable && aconnector && new_crtc_state->active))
8919                 return 0;
8920         /*
8921          * Given above conditions, the dc state cannot be NULL because:
8922          * 1. We're in the process of enabling CRTCs (just been added
8923          *    to the dc context, or already is on the context)
8924          * 2. Has a valid connector attached, and
8925          * 3. Is currently active and enabled.
8926          * => The dc stream state currently exists.
8927          */
8928         BUG_ON(dm_new_crtc_state->stream == NULL);
8929
8930         /* Scaling or underscan settings */
8931         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8932                 update_stream_scaling_settings(
8933                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8934
8935         /* ABM settings */
8936         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8937
8938         /*
8939          * Color management settings. We also update color properties
8940          * when a modeset is needed, to ensure it gets reprogrammed.
8941          */
8942         if (dm_new_crtc_state->base.color_mgmt_changed ||
8943             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8944                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8945                 if (ret)
8946                         goto fail;
8947         }
8948
8949         /* Update Freesync settings. */
8950         get_freesync_config_for_crtc(dm_new_crtc_state,
8951                                      dm_new_conn_state);
8952
8953         return ret;
8954
8955 fail:
8956         if (new_stream)
8957                 dc_stream_release(new_stream);
8958         return ret;
8959 }
8960
8961 static bool should_reset_plane(struct drm_atomic_state *state,
8962                                struct drm_plane *plane,
8963                                struct drm_plane_state *old_plane_state,
8964                                struct drm_plane_state *new_plane_state)
8965 {
8966         struct drm_plane *other;
8967         struct drm_plane_state *old_other_state, *new_other_state;
8968         struct drm_crtc_state *new_crtc_state;
8969         int i;
8970
8971         /*
8972          * TODO: Remove this hack once the checks below are sufficient
8973          * enough to determine when we need to reset all the planes on
8974          * the stream.
8975          */
8976         if (state->allow_modeset)
8977                 return true;
8978
8979         /* Exit early if we know that we're adding or removing the plane. */
8980         if (old_plane_state->crtc != new_plane_state->crtc)
8981                 return true;
8982
8983         /* old crtc == new_crtc == NULL, plane not in context. */
8984         if (!new_plane_state->crtc)
8985                 return false;
8986
8987         new_crtc_state =
8988                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8989
8990         if (!new_crtc_state)
8991                 return true;
8992
8993         /* CRTC Degamma changes currently require us to recreate planes. */
8994         if (new_crtc_state->color_mgmt_changed)
8995                 return true;
8996
8997         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8998                 return true;
8999
9000         /*
9001          * If there are any new primary or overlay planes being added or
9002          * removed then the z-order can potentially change. To ensure
9003          * correct z-order and pipe acquisition the current DC architecture
9004          * requires us to remove and recreate all existing planes.
9005          *
9006          * TODO: Come up with a more elegant solution for this.
9007          */
9008         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9009                 struct amdgpu_framebuffer *old_afb, *new_afb;
9010                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9011                         continue;
9012
9013                 if (old_other_state->crtc != new_plane_state->crtc &&
9014                     new_other_state->crtc != new_plane_state->crtc)
9015                         continue;
9016
9017                 if (old_other_state->crtc != new_other_state->crtc)
9018                         return true;
9019
9020                 /* Src/dst size and scaling updates. */
9021                 if (old_other_state->src_w != new_other_state->src_w ||
9022                     old_other_state->src_h != new_other_state->src_h ||
9023                     old_other_state->crtc_w != new_other_state->crtc_w ||
9024                     old_other_state->crtc_h != new_other_state->crtc_h)
9025                         return true;
9026
9027                 /* Rotation / mirroring updates. */
9028                 if (old_other_state->rotation != new_other_state->rotation)
9029                         return true;
9030
9031                 /* Blending updates. */
9032                 if (old_other_state->pixel_blend_mode !=
9033                     new_other_state->pixel_blend_mode)
9034                         return true;
9035
9036                 /* Alpha updates. */
9037                 if (old_other_state->alpha != new_other_state->alpha)
9038                         return true;
9039
9040                 /* Colorspace changes. */
9041                 if (old_other_state->color_range != new_other_state->color_range ||
9042                     old_other_state->color_encoding != new_other_state->color_encoding)
9043                         return true;
9044
9045                 /* Framebuffer checks fall at the end. */
9046                 if (!old_other_state->fb || !new_other_state->fb)
9047                         continue;
9048
9049                 /* Pixel format changes can require bandwidth updates. */
9050                 if (old_other_state->fb->format != new_other_state->fb->format)
9051                         return true;
9052
9053                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9054                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9055
9056                 /* Tiling and DCC changes also require bandwidth updates. */
9057                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9058                     old_afb->base.modifier != new_afb->base.modifier)
9059                         return true;
9060         }
9061
9062         return false;
9063 }
9064
9065 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9066                               struct drm_plane_state *new_plane_state,
9067                               struct drm_framebuffer *fb)
9068 {
9069         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9070         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9071         unsigned int pitch;
9072         bool linear;
9073
9074         if (fb->width > new_acrtc->max_cursor_width ||
9075             fb->height > new_acrtc->max_cursor_height) {
9076                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9077                                  new_plane_state->fb->width,
9078                                  new_plane_state->fb->height);
9079                 return -EINVAL;
9080         }
9081         if (new_plane_state->src_w != fb->width << 16 ||
9082             new_plane_state->src_h != fb->height << 16) {
9083                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9084                 return -EINVAL;
9085         }
9086
9087         /* Pitch in pixels */
9088         pitch = fb->pitches[0] / fb->format->cpp[0];
9089
9090         if (fb->width != pitch) {
9091                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9092                                  fb->width, pitch);
9093                 return -EINVAL;
9094         }
9095
9096         switch (pitch) {
9097         case 64:
9098         case 128:
9099         case 256:
9100                 /* FB pitch is supported by cursor plane */
9101                 break;
9102         default:
9103                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9104                 return -EINVAL;
9105         }
9106
9107         /* Core DRM takes care of checking FB modifiers, so we only need to
9108          * check tiling flags when the FB doesn't have a modifier. */
9109         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9110                 if (adev->family < AMDGPU_FAMILY_AI) {
9111                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9112                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9113                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9114                 } else {
9115                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9116                 }
9117                 if (!linear) {
9118                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9119                         return -EINVAL;
9120                 }
9121         }
9122
9123         return 0;
9124 }
9125
9126 static int dm_update_plane_state(struct dc *dc,
9127                                  struct drm_atomic_state *state,
9128                                  struct drm_plane *plane,
9129                                  struct drm_plane_state *old_plane_state,
9130                                  struct drm_plane_state *new_plane_state,
9131                                  bool enable,
9132                                  bool *lock_and_validation_needed)
9133 {
9134
9135         struct dm_atomic_state *dm_state = NULL;
9136         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9137         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9138         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9139         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9140         struct amdgpu_crtc *new_acrtc;
9141         bool needs_reset;
9142         int ret = 0;
9143
9144
9145         new_plane_crtc = new_plane_state->crtc;
9146         old_plane_crtc = old_plane_state->crtc;
9147         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9148         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9149
9150         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9151                 if (!enable || !new_plane_crtc ||
9152                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9153                         return 0;
9154
9155                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9156
9157                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9158                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9159                         return -EINVAL;
9160                 }
9161
9162                 if (new_plane_state->fb) {
9163                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9164                                                  new_plane_state->fb);
9165                         if (ret)
9166                                 return ret;
9167                 }
9168
9169                 return 0;
9170         }
9171
9172         needs_reset = should_reset_plane(state, plane, old_plane_state,
9173                                          new_plane_state);
9174
9175         /* Remove any changed/removed planes */
9176         if (!enable) {
9177                 if (!needs_reset)
9178                         return 0;
9179
9180                 if (!old_plane_crtc)
9181                         return 0;
9182
9183                 old_crtc_state = drm_atomic_get_old_crtc_state(
9184                                 state, old_plane_crtc);
9185                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9186
9187                 if (!dm_old_crtc_state->stream)
9188                         return 0;
9189
9190                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9191                                 plane->base.id, old_plane_crtc->base.id);
9192
9193                 ret = dm_atomic_get_state(state, &dm_state);
9194                 if (ret)
9195                         return ret;
9196
9197                 if (!dc_remove_plane_from_context(
9198                                 dc,
9199                                 dm_old_crtc_state->stream,
9200                                 dm_old_plane_state->dc_state,
9201                                 dm_state->context)) {
9202
9203                         return -EINVAL;
9204                 }
9205
9206
9207                 dc_plane_state_release(dm_old_plane_state->dc_state);
9208                 dm_new_plane_state->dc_state = NULL;
9209
9210                 *lock_and_validation_needed = true;
9211
9212         } else { /* Add new planes */
9213                 struct dc_plane_state *dc_new_plane_state;
9214
9215                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9216                         return 0;
9217
9218                 if (!new_plane_crtc)
9219                         return 0;
9220
9221                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9222                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9223
9224                 if (!dm_new_crtc_state->stream)
9225                         return 0;
9226
9227                 if (!needs_reset)
9228                         return 0;
9229
9230                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9231                 if (ret)
9232                         return ret;
9233
9234                 WARN_ON(dm_new_plane_state->dc_state);
9235
9236                 dc_new_plane_state = dc_create_plane_state(dc);
9237                 if (!dc_new_plane_state)
9238                         return -ENOMEM;
9239
9240                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9241                                 plane->base.id, new_plane_crtc->base.id);
9242
9243                 ret = fill_dc_plane_attributes(
9244                         drm_to_adev(new_plane_crtc->dev),
9245                         dc_new_plane_state,
9246                         new_plane_state,
9247                         new_crtc_state);
9248                 if (ret) {
9249                         dc_plane_state_release(dc_new_plane_state);
9250                         return ret;
9251                 }
9252
9253                 ret = dm_atomic_get_state(state, &dm_state);
9254                 if (ret) {
9255                         dc_plane_state_release(dc_new_plane_state);
9256                         return ret;
9257                 }
9258
9259                 /*
9260                  * Any atomic check errors that occur after this will
9261                  * not need a release. The plane state will be attached
9262                  * to the stream, and therefore part of the atomic
9263                  * state. It'll be released when the atomic state is
9264                  * cleaned.
9265                  */
9266                 if (!dc_add_plane_to_context(
9267                                 dc,
9268                                 dm_new_crtc_state->stream,
9269                                 dc_new_plane_state,
9270                                 dm_state->context)) {
9271
9272                         dc_plane_state_release(dc_new_plane_state);
9273                         return -EINVAL;
9274                 }
9275
9276                 dm_new_plane_state->dc_state = dc_new_plane_state;
9277
9278                 /* Tell DC to do a full surface update every time there
9279                  * is a plane change. Inefficient, but works for now.
9280                  */
9281                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9282
9283                 *lock_and_validation_needed = true;
9284         }
9285
9286
9287         return ret;
9288 }
9289
9290 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9291                                 struct drm_crtc *crtc,
9292                                 struct drm_crtc_state *new_crtc_state)
9293 {
9294         struct drm_plane_state *new_cursor_state, *new_primary_state;
9295         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9296
9297         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9298          * cursor per pipe but it's going to inherit the scaling and
9299          * positioning from the underlying pipe. Check the cursor plane's
9300          * blending properties match the primary plane's. */
9301
9302         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9303         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9304         if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9305                 return 0;
9306         }
9307
9308         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9309                          (new_cursor_state->src_w >> 16);
9310         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9311                          (new_cursor_state->src_h >> 16);
9312
9313         primary_scale_w = new_primary_state->crtc_w * 1000 /
9314                          (new_primary_state->src_w >> 16);
9315         primary_scale_h = new_primary_state->crtc_h * 1000 /
9316                          (new_primary_state->src_h >> 16);
9317
9318         if (cursor_scale_w != primary_scale_w ||
9319             cursor_scale_h != primary_scale_h) {
9320                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9321                 return -EINVAL;
9322         }
9323
9324         return 0;
9325 }
9326
9327 #if defined(CONFIG_DRM_AMD_DC_DCN)
9328 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9329 {
9330         struct drm_connector *connector;
9331         struct drm_connector_state *conn_state;
9332         struct amdgpu_dm_connector *aconnector = NULL;
9333         int i;
9334         for_each_new_connector_in_state(state, connector, conn_state, i) {
9335                 if (conn_state->crtc != crtc)
9336                         continue;
9337
9338                 aconnector = to_amdgpu_dm_connector(connector);
9339                 if (!aconnector->port || !aconnector->mst_port)
9340                         aconnector = NULL;
9341                 else
9342                         break;
9343         }
9344
9345         if (!aconnector)
9346                 return 0;
9347
9348         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9349 }
9350 #endif
9351
9352 /**
9353  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9354  * @dev: The DRM device
9355  * @state: The atomic state to commit
9356  *
9357  * Validate that the given atomic state is programmable by DC into hardware.
9358  * This involves constructing a &struct dc_state reflecting the new hardware
9359  * state we wish to commit, then querying DC to see if it is programmable. It's
9360  * important not to modify the existing DC state. Otherwise, atomic_check
9361  * may unexpectedly commit hardware changes.
9362  *
9363  * When validating the DC state, it's important that the right locks are
9364  * acquired. For full updates case which removes/adds/updates streams on one
9365  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9366  * that any such full update commit will wait for completion of any outstanding
9367  * flip using DRMs synchronization events.
9368  *
9369  * Note that DM adds the affected connectors for all CRTCs in state, when that
9370  * might not seem necessary. This is because DC stream creation requires the
9371  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9372  * be possible but non-trivial - a possible TODO item.
9373  *
9374  * Return: -Error code if validation failed.
9375  */
9376 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9377                                   struct drm_atomic_state *state)
9378 {
9379         struct amdgpu_device *adev = drm_to_adev(dev);
9380         struct dm_atomic_state *dm_state = NULL;
9381         struct dc *dc = adev->dm.dc;
9382         struct drm_connector *connector;
9383         struct drm_connector_state *old_con_state, *new_con_state;
9384         struct drm_crtc *crtc;
9385         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9386         struct drm_plane *plane;
9387         struct drm_plane_state *old_plane_state, *new_plane_state;
9388         enum dc_status status;
9389         int ret, i;
9390         bool lock_and_validation_needed = false;
9391         struct dm_crtc_state *dm_old_crtc_state;
9392
9393         trace_amdgpu_dm_atomic_check_begin(state);
9394
9395         ret = drm_atomic_helper_check_modeset(dev, state);
9396         if (ret)
9397                 goto fail;
9398
9399         /* Check connector changes */
9400         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9401                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9402                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9403
9404                 /* Skip connectors that are disabled or part of modeset already. */
9405                 if (!old_con_state->crtc && !new_con_state->crtc)
9406                         continue;
9407
9408                 if (!new_con_state->crtc)
9409                         continue;
9410
9411                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9412                 if (IS_ERR(new_crtc_state)) {
9413                         ret = PTR_ERR(new_crtc_state);
9414                         goto fail;
9415                 }
9416
9417                 if (dm_old_con_state->abm_level !=
9418                     dm_new_con_state->abm_level)
9419                         new_crtc_state->connectors_changed = true;
9420         }
9421
9422 #if defined(CONFIG_DRM_AMD_DC_DCN)
9423         if (adev->asic_type >= CHIP_NAVI10) {
9424                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9425                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9426                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9427                                 if (ret)
9428                                         goto fail;
9429                         }
9430                 }
9431         }
9432 #endif
9433         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9434                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9435
9436                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9437                     !new_crtc_state->color_mgmt_changed &&
9438                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9439                         dm_old_crtc_state->dsc_force_changed == false)
9440                         continue;
9441
9442                 if (!new_crtc_state->enable)
9443                         continue;
9444
9445                 ret = drm_atomic_add_affected_connectors(state, crtc);
9446                 if (ret)
9447                         return ret;
9448
9449                 ret = drm_atomic_add_affected_planes(state, crtc);
9450                 if (ret)
9451                         goto fail;
9452
9453                 if (dm_old_crtc_state->dsc_force_changed)
9454                         new_crtc_state->mode_changed = true;
9455         }
9456
9457         /*
9458          * Add all primary and overlay planes on the CRTC to the state
9459          * whenever a plane is enabled to maintain correct z-ordering
9460          * and to enable fast surface updates.
9461          */
9462         drm_for_each_crtc(crtc, dev) {
9463                 bool modified = false;
9464
9465                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9466                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9467                                 continue;
9468
9469                         if (new_plane_state->crtc == crtc ||
9470                             old_plane_state->crtc == crtc) {
9471                                 modified = true;
9472                                 break;
9473                         }
9474                 }
9475
9476                 if (!modified)
9477                         continue;
9478
9479                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9480                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9481                                 continue;
9482
9483                         new_plane_state =
9484                                 drm_atomic_get_plane_state(state, plane);
9485
9486                         if (IS_ERR(new_plane_state)) {
9487                                 ret = PTR_ERR(new_plane_state);
9488                                 goto fail;
9489                         }
9490                 }
9491         }
9492
9493         /* Remove exiting planes if they are modified */
9494         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9495                 ret = dm_update_plane_state(dc, state, plane,
9496                                             old_plane_state,
9497                                             new_plane_state,
9498                                             false,
9499                                             &lock_and_validation_needed);
9500                 if (ret)
9501                         goto fail;
9502         }
9503
9504         /* Disable all crtcs which require disable */
9505         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9506                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9507                                            old_crtc_state,
9508                                            new_crtc_state,
9509                                            false,
9510                                            &lock_and_validation_needed);
9511                 if (ret)
9512                         goto fail;
9513         }
9514
9515         /* Enable all crtcs which require enable */
9516         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9517                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9518                                            old_crtc_state,
9519                                            new_crtc_state,
9520                                            true,
9521                                            &lock_and_validation_needed);
9522                 if (ret)
9523                         goto fail;
9524         }
9525
9526         /* Add new/modified planes */
9527         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9528                 ret = dm_update_plane_state(dc, state, plane,
9529                                             old_plane_state,
9530                                             new_plane_state,
9531                                             true,
9532                                             &lock_and_validation_needed);
9533                 if (ret)
9534                         goto fail;
9535         }
9536
9537         /* Run this here since we want to validate the streams we created */
9538         ret = drm_atomic_helper_check_planes(dev, state);
9539         if (ret)
9540                 goto fail;
9541
9542         /* Check cursor planes scaling */
9543         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9544                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9545                 if (ret)
9546                         goto fail;
9547         }
9548
9549         if (state->legacy_cursor_update) {
9550                 /*
9551                  * This is a fast cursor update coming from the plane update
9552                  * helper, check if it can be done asynchronously for better
9553                  * performance.
9554                  */
9555                 state->async_update =
9556                         !drm_atomic_helper_async_check(dev, state);
9557
9558                 /*
9559                  * Skip the remaining global validation if this is an async
9560                  * update. Cursor updates can be done without affecting
9561                  * state or bandwidth calcs and this avoids the performance
9562                  * penalty of locking the private state object and
9563                  * allocating a new dc_state.
9564                  */
9565                 if (state->async_update)
9566                         return 0;
9567         }
9568
9569         /* Check scaling and underscan changes*/
9570         /* TODO Removed scaling changes validation due to inability to commit
9571          * new stream into context w\o causing full reset. Need to
9572          * decide how to handle.
9573          */
9574         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9575                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9576                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9577                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9578
9579                 /* Skip any modesets/resets */
9580                 if (!acrtc || drm_atomic_crtc_needs_modeset(
9581                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9582                         continue;
9583
9584                 /* Skip any thing not scale or underscan changes */
9585                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9586                         continue;
9587
9588                 lock_and_validation_needed = true;
9589         }
9590
9591         /**
9592          * Streams and planes are reset when there are changes that affect
9593          * bandwidth. Anything that affects bandwidth needs to go through
9594          * DC global validation to ensure that the configuration can be applied
9595          * to hardware.
9596          *
9597          * We have to currently stall out here in atomic_check for outstanding
9598          * commits to finish in this case because our IRQ handlers reference
9599          * DRM state directly - we can end up disabling interrupts too early
9600          * if we don't.
9601          *
9602          * TODO: Remove this stall and drop DM state private objects.
9603          */
9604         if (lock_and_validation_needed) {
9605                 ret = dm_atomic_get_state(state, &dm_state);
9606                 if (ret)
9607                         goto fail;
9608
9609                 ret = do_aquire_global_lock(dev, state);
9610                 if (ret)
9611                         goto fail;
9612
9613 #if defined(CONFIG_DRM_AMD_DC_DCN)
9614                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9615                         goto fail;
9616
9617                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9618                 if (ret)
9619                         goto fail;
9620 #endif
9621
9622                 /*
9623                  * Perform validation of MST topology in the state:
9624                  * We need to perform MST atomic check before calling
9625                  * dc_validate_global_state(), or there is a chance
9626                  * to get stuck in an infinite loop and hang eventually.
9627                  */
9628                 ret = drm_dp_mst_atomic_check(state);
9629                 if (ret)
9630                         goto fail;
9631                 status = dc_validate_global_state(dc, dm_state->context, false);
9632                 if (status != DC_OK) {
9633                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
9634                                        dc_status_to_str(status), status);
9635                         ret = -EINVAL;
9636                         goto fail;
9637                 }
9638         } else {
9639                 /*
9640                  * The commit is a fast update. Fast updates shouldn't change
9641                  * the DC context, affect global validation, and can have their
9642                  * commit work done in parallel with other commits not touching
9643                  * the same resource. If we have a new DC context as part of
9644                  * the DM atomic state from validation we need to free it and
9645                  * retain the existing one instead.
9646                  *
9647                  * Furthermore, since the DM atomic state only contains the DC
9648                  * context and can safely be annulled, we can free the state
9649                  * and clear the associated private object now to free
9650                  * some memory and avoid a possible use-after-free later.
9651                  */
9652
9653                 for (i = 0; i < state->num_private_objs; i++) {
9654                         struct drm_private_obj *obj = state->private_objs[i].ptr;
9655
9656                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
9657                                 int j = state->num_private_objs-1;
9658
9659                                 dm_atomic_destroy_state(obj,
9660                                                 state->private_objs[i].state);
9661
9662                                 /* If i is not at the end of the array then the
9663                                  * last element needs to be moved to where i was
9664                                  * before the array can safely be truncated.
9665                                  */
9666                                 if (i != j)
9667                                         state->private_objs[i] =
9668                                                 state->private_objs[j];
9669
9670                                 state->private_objs[j].ptr = NULL;
9671                                 state->private_objs[j].state = NULL;
9672                                 state->private_objs[j].old_state = NULL;
9673                                 state->private_objs[j].new_state = NULL;
9674
9675                                 state->num_private_objs = j;
9676                                 break;
9677                         }
9678                 }
9679         }
9680
9681         /* Store the overall update type for use later in atomic check. */
9682         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9683                 struct dm_crtc_state *dm_new_crtc_state =
9684                         to_dm_crtc_state(new_crtc_state);
9685
9686                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9687                                                          UPDATE_TYPE_FULL :
9688                                                          UPDATE_TYPE_FAST;
9689         }
9690
9691         /* Must be success */
9692         WARN_ON(ret);
9693
9694         trace_amdgpu_dm_atomic_check_finish(state, ret);
9695
9696         return ret;
9697
9698 fail:
9699         if (ret == -EDEADLK)
9700                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9701         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9702                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9703         else
9704                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9705
9706         trace_amdgpu_dm_atomic_check_finish(state, ret);
9707
9708         return ret;
9709 }
9710
9711 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9712                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
9713 {
9714         uint8_t dpcd_data;
9715         bool capable = false;
9716
9717         if (amdgpu_dm_connector->dc_link &&
9718                 dm_helpers_dp_read_dpcd(
9719                                 NULL,
9720                                 amdgpu_dm_connector->dc_link,
9721                                 DP_DOWN_STREAM_PORT_COUNT,
9722                                 &dpcd_data,
9723                                 sizeof(dpcd_data))) {
9724                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9725         }
9726
9727         return capable;
9728 }
9729 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9730                                         struct edid *edid)
9731 {
9732         int i;
9733         bool edid_check_required;
9734         struct detailed_timing *timing;
9735         struct detailed_non_pixel *data;
9736         struct detailed_data_monitor_range *range;
9737         struct amdgpu_dm_connector *amdgpu_dm_connector =
9738                         to_amdgpu_dm_connector(connector);
9739         struct dm_connector_state *dm_con_state = NULL;
9740
9741         struct drm_device *dev = connector->dev;
9742         struct amdgpu_device *adev = drm_to_adev(dev);
9743         bool freesync_capable = false;
9744
9745         if (!connector->state) {
9746                 DRM_ERROR("%s - Connector has no state", __func__);
9747                 goto update;
9748         }
9749
9750         if (!edid) {
9751                 dm_con_state = to_dm_connector_state(connector->state);
9752
9753                 amdgpu_dm_connector->min_vfreq = 0;
9754                 amdgpu_dm_connector->max_vfreq = 0;
9755                 amdgpu_dm_connector->pixel_clock_mhz = 0;
9756
9757                 goto update;
9758         }
9759
9760         dm_con_state = to_dm_connector_state(connector->state);
9761
9762         edid_check_required = false;
9763         if (!amdgpu_dm_connector->dc_sink) {
9764                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9765                 goto update;
9766         }
9767         if (!adev->dm.freesync_module)
9768                 goto update;
9769         /*
9770          * if edid non zero restrict freesync only for dp and edp
9771          */
9772         if (edid) {
9773                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9774                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9775                         edid_check_required = is_dp_capable_without_timing_msa(
9776                                                 adev->dm.dc,
9777                                                 amdgpu_dm_connector);
9778                 }
9779         }
9780         if (edid_check_required == true && (edid->version > 1 ||
9781            (edid->version == 1 && edid->revision > 1))) {
9782                 for (i = 0; i < 4; i++) {
9783
9784                         timing  = &edid->detailed_timings[i];
9785                         data    = &timing->data.other_data;
9786                         range   = &data->data.range;
9787                         /*
9788                          * Check if monitor has continuous frequency mode
9789                          */
9790                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
9791                                 continue;
9792                         /*
9793                          * Check for flag range limits only. If flag == 1 then
9794                          * no additional timing information provided.
9795                          * Default GTF, GTF Secondary curve and CVT are not
9796                          * supported
9797                          */
9798                         if (range->flags != 1)
9799                                 continue;
9800
9801                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9802                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9803                         amdgpu_dm_connector->pixel_clock_mhz =
9804                                 range->pixel_clock_mhz * 10;
9805                         break;
9806                 }
9807
9808                 if (amdgpu_dm_connector->max_vfreq -
9809                     amdgpu_dm_connector->min_vfreq > 10) {
9810
9811                         freesync_capable = true;
9812                 }
9813         }
9814
9815 update:
9816         if (dm_con_state)
9817                 dm_con_state->freesync_capable = freesync_capable;
9818
9819         if (connector->vrr_capable_property)
9820                 drm_connector_set_vrr_capable_property(connector,
9821                                                        freesync_capable);
9822 }
9823
9824 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9825 {
9826         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9827
9828         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9829                 return;
9830         if (link->type == dc_connection_none)
9831                 return;
9832         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9833                                         dpcd_data, sizeof(dpcd_data))) {
9834                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9835
9836                 if (dpcd_data[0] == 0) {
9837                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9838                         link->psr_settings.psr_feature_enabled = false;
9839                 } else {
9840                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
9841                         link->psr_settings.psr_feature_enabled = true;
9842                 }
9843
9844                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9845         }
9846 }
9847
9848 /*
9849  * amdgpu_dm_link_setup_psr() - configure psr link
9850  * @stream: stream state
9851  *
9852  * Return: true if success
9853  */
9854 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9855 {
9856         struct dc_link *link = NULL;
9857         struct psr_config psr_config = {0};
9858         struct psr_context psr_context = {0};
9859         bool ret = false;
9860
9861         if (stream == NULL)
9862                 return false;
9863
9864         link = stream->link;
9865
9866         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9867
9868         if (psr_config.psr_version > 0) {
9869                 psr_config.psr_exit_link_training_required = 0x1;
9870                 psr_config.psr_frame_capture_indication_req = 0;
9871                 psr_config.psr_rfb_setup_time = 0x37;
9872                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9873                 psr_config.allow_smu_optimizations = 0x0;
9874
9875                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9876
9877         }
9878         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
9879
9880         return ret;
9881 }
9882
9883 /*
9884  * amdgpu_dm_psr_enable() - enable psr f/w
9885  * @stream: stream state
9886  *
9887  * Return: true if success
9888  */
9889 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9890 {
9891         struct dc_link *link = stream->link;
9892         unsigned int vsync_rate_hz = 0;
9893         struct dc_static_screen_params params = {0};
9894         /* Calculate number of static frames before generating interrupt to
9895          * enter PSR.
9896          */
9897         // Init fail safe of 2 frames static
9898         unsigned int num_frames_static = 2;
9899
9900         DRM_DEBUG_DRIVER("Enabling psr...\n");
9901
9902         vsync_rate_hz = div64_u64(div64_u64((
9903                         stream->timing.pix_clk_100hz * 100),
9904                         stream->timing.v_total),
9905                         stream->timing.h_total);
9906
9907         /* Round up
9908          * Calculate number of frames such that at least 30 ms of time has
9909          * passed.
9910          */
9911         if (vsync_rate_hz != 0) {
9912                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9913                 num_frames_static = (30000 / frame_time_microsec) + 1;
9914         }
9915
9916         params.triggers.cursor_update = true;
9917         params.triggers.overlay_update = true;
9918         params.triggers.surface_update = true;
9919         params.num_frames = num_frames_static;
9920
9921         dc_stream_set_static_screen_params(link->ctx->dc,
9922                                            &stream, 1,
9923                                            &params);
9924
9925         return dc_link_set_psr_allow_active(link, true, false, false);
9926 }
9927
9928 /*
9929  * amdgpu_dm_psr_disable() - disable psr f/w
9930  * @stream:  stream state
9931  *
9932  * Return: true if success
9933  */
9934 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9935 {
9936
9937         DRM_DEBUG_DRIVER("Disabling psr...\n");
9938
9939         return dc_link_set_psr_allow_active(stream->link, false, true, false);
9940 }
9941
9942 /*
9943  * amdgpu_dm_psr_disable() - disable psr f/w
9944  * if psr is enabled on any stream
9945  *
9946  * Return: true if success
9947  */
9948 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9949 {
9950         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9951         return dc_set_psr_allow_active(dm->dc, false);
9952 }
9953
9954 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9955 {
9956         struct amdgpu_device *adev = drm_to_adev(dev);
9957         struct dc *dc = adev->dm.dc;
9958         int i;
9959
9960         mutex_lock(&adev->dm.dc_lock);
9961         if (dc->current_state) {
9962                 for (i = 0; i < dc->current_state->stream_count; ++i)
9963                         dc->current_state->streams[i]
9964                                 ->triggered_crtc_reset.enabled =
9965                                 adev->dm.force_timing_sync;
9966
9967                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9968                 dc_trigger_sync(dc, dc->current_state);
9969         }
9970         mutex_unlock(&adev->dm.dc_lock);
9971 }
9972
9973 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9974                        uint32_t value, const char *func_name)
9975 {
9976 #ifdef DM_CHECK_ADDR_0
9977         if (address == 0) {
9978                 DC_ERR("invalid register write. address = 0");
9979                 return;
9980         }
9981 #endif
9982         cgs_write_register(ctx->cgs_device, address, value);
9983         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9984 }
9985
9986 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9987                           const char *func_name)
9988 {
9989         uint32_t value;
9990 #ifdef DM_CHECK_ADDR_0
9991         if (address == 0) {
9992                 DC_ERR("invalid register read; address = 0\n");
9993                 return 0;
9994         }
9995 #endif
9996
9997         if (ctx->dmub_srv &&
9998             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9999             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10000                 ASSERT(false);
10001                 return 0;
10002         }
10003
10004         value = cgs_read_register(ctx->cgs_device, address);
10005
10006         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10007
10008         return value;
10009 }