73e03a503b79471463f6db319a8be3ad1cf17fe4
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58
59 #include "ivsrcid/ivsrcid_vislands30.h"
60
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136         switch (link->dpcd_caps.dongle_type) {
137         case DISPLAY_DONGLE_NONE:
138                 return DRM_MODE_SUBCONNECTOR_Native;
139         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140                 return DRM_MODE_SUBCONNECTOR_VGA;
141         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142         case DISPLAY_DONGLE_DP_DVI_DONGLE:
143                 return DRM_MODE_SUBCONNECTOR_DVID;
144         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146                 return DRM_MODE_SUBCONNECTOR_HDMIA;
147         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148         default:
149                 return DRM_MODE_SUBCONNECTOR_Unknown;
150         }
151 }
152
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155         struct dc_link *link = aconnector->dc_link;
156         struct drm_connector *connector = &aconnector->base;
157         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158
159         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160                 return;
161
162         if (aconnector->dc_sink)
163                 subconnector = get_subconnector_type(link);
164
165         drm_object_property_set_value(&connector->base,
166                         connector->dev->mode_config.dp_subconnector_property,
167                         subconnector);
168 }
169
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182                                 struct drm_plane *plane,
183                                 unsigned long possible_crtcs,
184                                 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186                                struct drm_plane *plane,
187                                uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
190                                     uint32_t link_index,
191                                     struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193                                   struct amdgpu_encoder *aencoder,
194                                   uint32_t link_index);
195
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201                                   struct drm_atomic_state *state);
202
203 static void handle_cursor_update(struct drm_plane *plane,
204                                  struct drm_plane_state *old_plane_state);
205
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214
215 /*
216  * dm_vblank_get_counter
217  *
218  * @brief
219  * Get counter for number of vertical blanks
220  *
221  * @param
222  * struct amdgpu_device *adev - [in] desired amdgpu device
223  * int disp_idx - [in] which CRTC to get the counter from
224  *
225  * @return
226  * Counter for vertical blanks
227  */
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229 {
230         if (crtc >= adev->mode_info.num_crtc)
231                 return 0;
232         else {
233                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234
235                 if (acrtc->dm_irq_params.stream == NULL) {
236                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237                                   crtc);
238                         return 0;
239                 }
240
241                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
242         }
243 }
244
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246                                   u32 *vbl, u32 *position)
247 {
248         uint32_t v_blank_start, v_blank_end, h_position, v_position;
249
250         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251                 return -EINVAL;
252         else {
253                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254
255                 if (acrtc->dm_irq_params.stream ==  NULL) {
256                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257                                   crtc);
258                         return 0;
259                 }
260
261                 /*
262                  * TODO rework base driver to use values directly.
263                  * for now parse it back into reg-format
264                  */
265                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
266                                          &v_blank_start,
267                                          &v_blank_end,
268                                          &h_position,
269                                          &v_position);
270
271                 *position = v_position | (h_position << 16);
272                 *vbl = v_blank_start | (v_blank_end << 16);
273         }
274
275         return 0;
276 }
277
278 static bool dm_is_idle(void *handle)
279 {
280         /* XXX todo */
281         return true;
282 }
283
284 static int dm_wait_for_idle(void *handle)
285 {
286         /* XXX todo */
287         return 0;
288 }
289
290 static bool dm_check_soft_reset(void *handle)
291 {
292         return false;
293 }
294
295 static int dm_soft_reset(void *handle)
296 {
297         /* XXX todo */
298         return 0;
299 }
300
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
303                      int otg_inst)
304 {
305         struct drm_device *dev = adev_to_drm(adev);
306         struct drm_crtc *crtc;
307         struct amdgpu_crtc *amdgpu_crtc;
308
309         if (otg_inst == -1) {
310                 WARN_ON(1);
311                 return adev->mode_info.crtcs[0];
312         }
313
314         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315                 amdgpu_crtc = to_amdgpu_crtc(crtc);
316
317                 if (amdgpu_crtc->otg_inst == otg_inst)
318                         return amdgpu_crtc;
319         }
320
321         return NULL;
322 }
323
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325 {
326         return acrtc->dm_irq_params.freesync_config.state ==
327                        VRR_STATE_ACTIVE_VARIABLE ||
328                acrtc->dm_irq_params.freesync_config.state ==
329                        VRR_STATE_ACTIVE_FIXED;
330 }
331
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333 {
334         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336 }
337
338 /**
339  * dm_pflip_high_irq() - Handle pageflip interrupt
340  * @interrupt_params: ignored
341  *
342  * Handles the pageflip interrupt by notifying all interested parties
343  * that the pageflip has been completed.
344  */
345 static void dm_pflip_high_irq(void *interrupt_params)
346 {
347         struct amdgpu_crtc *amdgpu_crtc;
348         struct common_irq_params *irq_params = interrupt_params;
349         struct amdgpu_device *adev = irq_params->adev;
350         unsigned long flags;
351         struct drm_pending_vblank_event *e;
352         uint32_t vpos, hpos, v_blank_start, v_blank_end;
353         bool vrr_active;
354
355         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356
357         /* IRQ could occur when in initial stage */
358         /* TODO work and BO cleanup */
359         if (amdgpu_crtc == NULL) {
360                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361                 return;
362         }
363
364         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
365
366         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368                                                  amdgpu_crtc->pflip_status,
369                                                  AMDGPU_FLIP_SUBMITTED,
370                                                  amdgpu_crtc->crtc_id,
371                                                  amdgpu_crtc);
372                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
373                 return;
374         }
375
376         /* page flip completed. */
377         e = amdgpu_crtc->event;
378         amdgpu_crtc->event = NULL;
379
380         if (!e)
381                 WARN_ON(1);
382
383         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
384
385         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
386         if (!vrr_active ||
387             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388                                       &v_blank_end, &hpos, &vpos) ||
389             (vpos < v_blank_start)) {
390                 /* Update to correct count and vblank timestamp if racing with
391                  * vblank irq. This also updates to the correct vblank timestamp
392                  * even in VRR mode, as scanout is past the front-porch atm.
393                  */
394                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
395
396                 /* Wake up userspace by sending the pageflip event with proper
397                  * count and timestamp of vblank of flip completion.
398                  */
399                 if (e) {
400                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401
402                         /* Event sent, so done with vblank for this flip */
403                         drm_crtc_vblank_put(&amdgpu_crtc->base);
404                 }
405         } else if (e) {
406                 /* VRR active and inside front-porch: vblank count and
407                  * timestamp for pageflip event will only be up to date after
408                  * drm_crtc_handle_vblank() has been executed from late vblank
409                  * irq handler after start of back-porch (vline 0). We queue the
410                  * pageflip event for send-out by drm_crtc_handle_vblank() with
411                  * updated timestamp and count, once it runs after us.
412                  *
413                  * We need to open-code this instead of using the helper
414                  * drm_crtc_arm_vblank_event(), as that helper would
415                  * call drm_crtc_accurate_vblank_count(), which we must
416                  * not call in VRR mode while we are in front-porch!
417                  */
418
419                 /* sequence will be replaced by real count during send-out. */
420                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421                 e->pipe = amdgpu_crtc->crtc_id;
422
423                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
424                 e = NULL;
425         }
426
427         /* Keep track of vblank of this flip for flip throttling. We use the
428          * cooked hw counter, as that one incremented at start of this vblank
429          * of pageflip completion, so last_flip_vblank is the forbidden count
430          * for queueing new pageflips if vsync + VRR is enabled.
431          */
432         amdgpu_crtc->dm_irq_params.last_flip_vblank =
433                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
434
435         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
437
438         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439                          amdgpu_crtc->crtc_id, amdgpu_crtc,
440                          vrr_active, (int) !e);
441 }
442
443 static void dm_vupdate_high_irq(void *interrupt_params)
444 {
445         struct common_irq_params *irq_params = interrupt_params;
446         struct amdgpu_device *adev = irq_params->adev;
447         struct amdgpu_crtc *acrtc;
448         unsigned long flags;
449         int vrr_active;
450
451         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452
453         if (acrtc) {
454                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
455
456                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457                               acrtc->crtc_id,
458                               vrr_active);
459
460                 /* Core vblank handling is done here after end of front-porch in
461                  * vrr mode, as vblank timestamping will give valid results
462                  * while now done after front-porch. This will also deliver
463                  * page-flip completion events that have been queued to us
464                  * if a pageflip happened inside front-porch.
465                  */
466                 if (vrr_active) {
467                         drm_crtc_handle_vblank(&acrtc->base);
468
469                         /* BTR processing for pre-DCE12 ASICs */
470                         if (acrtc->dm_irq_params.stream &&
471                             adev->family < AMDGPU_FAMILY_AI) {
472                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473                                 mod_freesync_handle_v_update(
474                                     adev->dm.freesync_module,
475                                     acrtc->dm_irq_params.stream,
476                                     &acrtc->dm_irq_params.vrr_params);
477
478                                 dc_stream_adjust_vmin_vmax(
479                                     adev->dm.dc,
480                                     acrtc->dm_irq_params.stream,
481                                     &acrtc->dm_irq_params.vrr_params.adjust);
482                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
483                         }
484                 }
485         }
486 }
487
488 /**
489  * dm_crtc_high_irq() - Handles CRTC interrupt
490  * @interrupt_params: used for determining the CRTC instance
491  *
492  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493  * event handler.
494  */
495 static void dm_crtc_high_irq(void *interrupt_params)
496 {
497         struct common_irq_params *irq_params = interrupt_params;
498         struct amdgpu_device *adev = irq_params->adev;
499         struct amdgpu_crtc *acrtc;
500         unsigned long flags;
501         int vrr_active;
502
503         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
504         if (!acrtc)
505                 return;
506
507         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
508
509         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510                       vrr_active, acrtc->dm_irq_params.active_planes);
511
512         /**
513          * Core vblank handling at start of front-porch is only possible
514          * in non-vrr mode, as only there vblank timestamping will give
515          * valid results while done in front-porch. Otherwise defer it
516          * to dm_vupdate_high_irq after end of front-porch.
517          */
518         if (!vrr_active)
519                 drm_crtc_handle_vblank(&acrtc->base);
520
521         /**
522          * Following stuff must happen at start of vblank, for crc
523          * computation and below-the-range btr support in vrr mode.
524          */
525         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
526
527         /* BTR updates need to happen before VUPDATE on Vega and above. */
528         if (adev->family < AMDGPU_FAMILY_AI)
529                 return;
530
531         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
532
533         if (acrtc->dm_irq_params.stream &&
534             acrtc->dm_irq_params.vrr_params.supported &&
535             acrtc->dm_irq_params.freesync_config.state ==
536                     VRR_STATE_ACTIVE_VARIABLE) {
537                 mod_freesync_handle_v_update(adev->dm.freesync_module,
538                                              acrtc->dm_irq_params.stream,
539                                              &acrtc->dm_irq_params.vrr_params);
540
541                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542                                            &acrtc->dm_irq_params.vrr_params.adjust);
543         }
544
545         /*
546          * If there aren't any active_planes then DCH HUBP may be clock-gated.
547          * In that case, pageflip completion interrupts won't fire and pageflip
548          * completion events won't get delivered. Prevent this by sending
549          * pending pageflip events from here if a flip is still pending.
550          *
551          * If any planes are enabled, use dm_pflip_high_irq() instead, to
552          * avoid race conditions between flip programming and completion,
553          * which could cause too early flip completion events.
554          */
555         if (adev->family >= AMDGPU_FAMILY_RV &&
556             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557             acrtc->dm_irq_params.active_planes == 0) {
558                 if (acrtc->event) {
559                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560                         acrtc->event = NULL;
561                         drm_crtc_vblank_put(&acrtc->base);
562                 }
563                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
564         }
565
566         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
567 }
568
569 static int dm_set_clockgating_state(void *handle,
570                   enum amd_clockgating_state state)
571 {
572         return 0;
573 }
574
575 static int dm_set_powergating_state(void *handle,
576                   enum amd_powergating_state state)
577 {
578         return 0;
579 }
580
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
583
584 /* Allocate memory for FBC compressed data  */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
586 {
587         struct drm_device *dev = connector->dev;
588         struct amdgpu_device *adev = drm_to_adev(dev);
589         struct dm_compressor_info *compressor = &adev->dm.compressor;
590         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591         struct drm_display_mode *mode;
592         unsigned long max_size = 0;
593
594         if (adev->dm.dc->fbc_compressor == NULL)
595                 return;
596
597         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
598                 return;
599
600         if (compressor->bo_ptr)
601                 return;
602
603
604         list_for_each_entry(mode, &connector->modes, head) {
605                 if (max_size < mode->htotal * mode->vtotal)
606                         max_size = mode->htotal * mode->vtotal;
607         }
608
609         if (max_size) {
610                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612                             &compressor->gpu_addr, &compressor->cpu_addr);
613
614                 if (r)
615                         DRM_ERROR("DM: Failed to initialize FBC\n");
616                 else {
617                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619                 }
620
621         }
622
623 }
624
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626                                           int pipe, bool *enabled,
627                                           unsigned char *buf, int max_bytes)
628 {
629         struct drm_device *dev = dev_get_drvdata(kdev);
630         struct amdgpu_device *adev = drm_to_adev(dev);
631         struct drm_connector *connector;
632         struct drm_connector_list_iter conn_iter;
633         struct amdgpu_dm_connector *aconnector;
634         int ret = 0;
635
636         *enabled = false;
637
638         mutex_lock(&adev->dm.audio_lock);
639
640         drm_connector_list_iter_begin(dev, &conn_iter);
641         drm_for_each_connector_iter(connector, &conn_iter) {
642                 aconnector = to_amdgpu_dm_connector(connector);
643                 if (aconnector->audio_inst != port)
644                         continue;
645
646                 *enabled = true;
647                 ret = drm_eld_size(connector->eld);
648                 memcpy(buf, connector->eld, min(max_bytes, ret));
649
650                 break;
651         }
652         drm_connector_list_iter_end(&conn_iter);
653
654         mutex_unlock(&adev->dm.audio_lock);
655
656         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657
658         return ret;
659 }
660
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662         .get_eld = amdgpu_dm_audio_component_get_eld,
663 };
664
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666                                        struct device *hda_kdev, void *data)
667 {
668         struct drm_device *dev = dev_get_drvdata(kdev);
669         struct amdgpu_device *adev = drm_to_adev(dev);
670         struct drm_audio_component *acomp = data;
671
672         acomp->ops = &amdgpu_dm_audio_component_ops;
673         acomp->dev = kdev;
674         adev->dm.audio_component = acomp;
675
676         return 0;
677 }
678
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680                                           struct device *hda_kdev, void *data)
681 {
682         struct drm_device *dev = dev_get_drvdata(kdev);
683         struct amdgpu_device *adev = drm_to_adev(dev);
684         struct drm_audio_component *acomp = data;
685
686         acomp->ops = NULL;
687         acomp->dev = NULL;
688         adev->dm.audio_component = NULL;
689 }
690
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692         .bind   = amdgpu_dm_audio_component_bind,
693         .unbind = amdgpu_dm_audio_component_unbind,
694 };
695
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697 {
698         int i, ret;
699
700         if (!amdgpu_audio)
701                 return 0;
702
703         adev->mode_info.audio.enabled = true;
704
705         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706
707         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708                 adev->mode_info.audio.pin[i].channels = -1;
709                 adev->mode_info.audio.pin[i].rate = -1;
710                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
711                 adev->mode_info.audio.pin[i].status_bits = 0;
712                 adev->mode_info.audio.pin[i].category_code = 0;
713                 adev->mode_info.audio.pin[i].connected = false;
714                 adev->mode_info.audio.pin[i].id =
715                         adev->dm.dc->res_pool->audios[i]->inst;
716                 adev->mode_info.audio.pin[i].offset = 0;
717         }
718
719         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720         if (ret < 0)
721                 return ret;
722
723         adev->dm.audio_registered = true;
724
725         return 0;
726 }
727
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729 {
730         if (!amdgpu_audio)
731                 return;
732
733         if (!adev->mode_info.audio.enabled)
734                 return;
735
736         if (adev->dm.audio_registered) {
737                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738                 adev->dm.audio_registered = false;
739         }
740
741         /* TODO: Disable audio? */
742
743         adev->mode_info.audio.enabled = false;
744 }
745
746 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
747 {
748         struct drm_audio_component *acomp = adev->dm.audio_component;
749
750         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752
753                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754                                                  pin, -1);
755         }
756 }
757
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
759 {
760         const struct dmcub_firmware_header_v1_0 *hdr;
761         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763         const struct firmware *dmub_fw = adev->dm.dmub_fw;
764         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765         struct abm *abm = adev->dm.dc->res_pool->abm;
766         struct dmub_srv_hw_params hw_params;
767         enum dmub_status status;
768         const unsigned char *fw_inst_const, *fw_bss_data;
769         uint32_t i, fw_inst_const_size, fw_bss_data_size;
770         bool has_hw_support;
771
772         if (!dmub_srv)
773                 /* DMUB isn't supported on the ASIC. */
774                 return 0;
775
776         if (!fb_info) {
777                 DRM_ERROR("No framebuffer info for DMUB service.\n");
778                 return -EINVAL;
779         }
780
781         if (!dmub_fw) {
782                 /* Firmware required for DMUB support. */
783                 DRM_ERROR("No firmware provided for DMUB.\n");
784                 return -EINVAL;
785         }
786
787         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788         if (status != DMUB_STATUS_OK) {
789                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790                 return -EINVAL;
791         }
792
793         if (!has_hw_support) {
794                 DRM_INFO("DMUB unsupported on ASIC\n");
795                 return 0;
796         }
797
798         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799
800         fw_inst_const = dmub_fw->data +
801                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
802                         PSP_HEADER_BYTES;
803
804         fw_bss_data = dmub_fw->data +
805                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806                       le32_to_cpu(hdr->inst_const_bytes);
807
808         /* Copy firmware and bios info into FB memory. */
809         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811
812         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813
814         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815          * amdgpu_ucode_init_single_fw will load dmub firmware
816          * fw_inst_const part to cw0; otherwise, the firmware back door load
817          * will be done by dm_dmub_hw_init
818          */
819         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821                                 fw_inst_const_size);
822         }
823
824         if (fw_bss_data_size)
825                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826                        fw_bss_data, fw_bss_data_size);
827
828         /* Copy firmware bios info into FB memory. */
829         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830                adev->bios_size);
831
832         /* Reset regions that need to be reset. */
833         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835
836         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838
839         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
841
842         /* Initialize hardware. */
843         memset(&hw_params, 0, sizeof(hw_params));
844         hw_params.fb_base = adev->gmc.fb_start;
845         hw_params.fb_offset = adev->gmc.aper_base;
846
847         /* backdoor load firmware and trigger dmub running */
848         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849                 hw_params.load_inst_const = true;
850
851         if (dmcu)
852                 hw_params.psp_version = dmcu->psp_version;
853
854         for (i = 0; i < fb_info->num_fb; ++i)
855                 hw_params.fb[i] = &fb_info->fb[i];
856
857         status = dmub_srv_hw_init(dmub_srv, &hw_params);
858         if (status != DMUB_STATUS_OK) {
859                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860                 return -EINVAL;
861         }
862
863         /* Wait for firmware load to finish. */
864         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865         if (status != DMUB_STATUS_OK)
866                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867
868         /* Init DMCU and ABM if available. */
869         if (dmcu && abm) {
870                 dmcu->funcs->dmcu_init(dmcu);
871                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872         }
873
874         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875         if (!adev->dm.dc->ctx->dmub_srv) {
876                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877                 return -ENOMEM;
878         }
879
880         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881                  adev->dm.dmcub_fw_version);
882
883         return 0;
884 }
885
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
888 {
889         uint64_t pt_base;
890         uint32_t logical_addr_low;
891         uint32_t logical_addr_high;
892         uint32_t agp_base, agp_bot, agp_top;
893         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
894
895         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
897
898         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899                 /*
900                  * Raven2 has a HW issue that it is unable to use the vram which
901                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902                  * workaround that increase system aperture high address (add 1)
903                  * to get rid of the VM fault and hardware hang.
904                  */
905                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906         else
907                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
908
909         agp_base = 0;
910         agp_bot = adev->gmc.agp_start >> 24;
911         agp_top = adev->gmc.agp_end >> 24;
912
913
914         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919         page_table_base.low_part = lower_32_bits(pt_base);
920
921         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923
924         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927
928         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931
932         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935
936         pa_config->is_hvm_enabled = 0;
937
938 }
939 #endif
940
941 static int amdgpu_dm_init(struct amdgpu_device *adev)
942 {
943         struct dc_init_data init_data;
944 #ifdef CONFIG_DRM_AMD_DC_HDCP
945         struct dc_callback_init init_params;
946 #endif
947         int r;
948
949         adev->dm.ddev = adev_to_drm(adev);
950         adev->dm.adev = adev;
951
952         /* Zero all the fields */
953         memset(&init_data, 0, sizeof(init_data));
954 #ifdef CONFIG_DRM_AMD_DC_HDCP
955         memset(&init_params, 0, sizeof(init_params));
956 #endif
957
958         mutex_init(&adev->dm.dc_lock);
959         mutex_init(&adev->dm.audio_lock);
960
961         if(amdgpu_dm_irq_init(adev)) {
962                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
963                 goto error;
964         }
965
966         init_data.asic_id.chip_family = adev->family;
967
968         init_data.asic_id.pci_revision_id = adev->pdev->revision;
969         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
970
971         init_data.asic_id.vram_width = adev->gmc.vram_width;
972         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
973         init_data.asic_id.atombios_base_address =
974                 adev->mode_info.atom_context->bios;
975
976         init_data.driver = adev;
977
978         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
979
980         if (!adev->dm.cgs_device) {
981                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
982                 goto error;
983         }
984
985         init_data.cgs_device = adev->dm.cgs_device;
986
987         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
988
989         switch (adev->asic_type) {
990         case CHIP_CARRIZO:
991         case CHIP_STONEY:
992         case CHIP_RAVEN:
993         case CHIP_RENOIR:
994                 init_data.flags.gpu_vm_support = true;
995                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
996                         init_data.flags.disable_dmcu = true;
997                 break;
998 #if defined(CONFIG_DRM_AMD_DC_DCN)
999         case CHIP_VANGOGH:
1000                 init_data.flags.gpu_vm_support = true;
1001                 break;
1002 #endif
1003         default:
1004                 break;
1005         }
1006
1007         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1008                 init_data.flags.fbc_support = true;
1009
1010         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1011                 init_data.flags.multi_mon_pp_mclk_switch = true;
1012
1013         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1014                 init_data.flags.disable_fractional_pwm = true;
1015
1016         init_data.flags.power_down_display_on_boot = true;
1017
1018         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1019
1020         /* Display Core create. */
1021         adev->dm.dc = dc_create(&init_data);
1022
1023         if (adev->dm.dc) {
1024                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1025         } else {
1026                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1027                 goto error;
1028         }
1029
1030         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1031                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1032                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1033         }
1034
1035         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1036                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1037
1038         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1039                 adev->dm.dc->debug.disable_stutter = true;
1040
1041         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1042                 adev->dm.dc->debug.disable_dsc = true;
1043
1044         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1045                 adev->dm.dc->debug.disable_clock_gate = true;
1046
1047         r = dm_dmub_hw_init(adev);
1048         if (r) {
1049                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1050                 goto error;
1051         }
1052
1053         dc_hardware_init(adev->dm.dc);
1054
1055 #if defined(CONFIG_DRM_AMD_DC_DCN)
1056         if (adev->apu_flags) {
1057                 struct dc_phy_addr_space_config pa_config;
1058
1059                 mmhub_read_system_context(adev, &pa_config);
1060
1061                 // Call the DC init_memory func
1062                 dc_setup_system_context(adev->dm.dc, &pa_config);
1063         }
1064 #endif
1065
1066         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1067         if (!adev->dm.freesync_module) {
1068                 DRM_ERROR(
1069                 "amdgpu: failed to initialize freesync_module.\n");
1070         } else
1071                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1072                                 adev->dm.freesync_module);
1073
1074         amdgpu_dm_init_color_mod();
1075
1076 #ifdef CONFIG_DRM_AMD_DC_HDCP
1077         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1078                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1079
1080                 if (!adev->dm.hdcp_workqueue)
1081                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1082                 else
1083                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1084
1085                 dc_init_callbacks(adev->dm.dc, &init_params);
1086         }
1087 #endif
1088         if (amdgpu_dm_initialize_drm_device(adev)) {
1089                 DRM_ERROR(
1090                 "amdgpu: failed to initialize sw for display support.\n");
1091                 goto error;
1092         }
1093
1094         /* create fake encoders for MST */
1095         dm_dp_create_fake_mst_encoders(adev);
1096
1097         /* TODO: Add_display_info? */
1098
1099         /* TODO use dynamic cursor width */
1100         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1101         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1102
1103         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1104                 DRM_ERROR(
1105                 "amdgpu: failed to initialize sw for display support.\n");
1106                 goto error;
1107         }
1108
1109
1110         DRM_DEBUG_DRIVER("KMS initialized.\n");
1111
1112         return 0;
1113 error:
1114         amdgpu_dm_fini(adev);
1115
1116         return -EINVAL;
1117 }
1118
1119 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1120 {
1121         int i;
1122
1123         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1124                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1125         }
1126
1127         amdgpu_dm_audio_fini(adev);
1128
1129         amdgpu_dm_destroy_drm_device(&adev->dm);
1130
1131 #ifdef CONFIG_DRM_AMD_DC_HDCP
1132         if (adev->dm.hdcp_workqueue) {
1133                 hdcp_destroy(adev->dm.hdcp_workqueue);
1134                 adev->dm.hdcp_workqueue = NULL;
1135         }
1136
1137         if (adev->dm.dc)
1138                 dc_deinit_callbacks(adev->dm.dc);
1139 #endif
1140         if (adev->dm.dc->ctx->dmub_srv) {
1141                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1142                 adev->dm.dc->ctx->dmub_srv = NULL;
1143         }
1144
1145         if (adev->dm.dmub_bo)
1146                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1147                                       &adev->dm.dmub_bo_gpu_addr,
1148                                       &adev->dm.dmub_bo_cpu_addr);
1149
1150         /* DC Destroy TODO: Replace destroy DAL */
1151         if (adev->dm.dc)
1152                 dc_destroy(&adev->dm.dc);
1153         /*
1154          * TODO: pageflip, vlank interrupt
1155          *
1156          * amdgpu_dm_irq_fini(adev);
1157          */
1158
1159         if (adev->dm.cgs_device) {
1160                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1161                 adev->dm.cgs_device = NULL;
1162         }
1163         if (adev->dm.freesync_module) {
1164                 mod_freesync_destroy(adev->dm.freesync_module);
1165                 adev->dm.freesync_module = NULL;
1166         }
1167
1168         mutex_destroy(&adev->dm.audio_lock);
1169         mutex_destroy(&adev->dm.dc_lock);
1170
1171         return;
1172 }
1173
1174 static int load_dmcu_fw(struct amdgpu_device *adev)
1175 {
1176         const char *fw_name_dmcu = NULL;
1177         int r;
1178         const struct dmcu_firmware_header_v1_0 *hdr;
1179
1180         switch(adev->asic_type) {
1181 #if defined(CONFIG_DRM_AMD_DC_SI)
1182         case CHIP_TAHITI:
1183         case CHIP_PITCAIRN:
1184         case CHIP_VERDE:
1185         case CHIP_OLAND:
1186 #endif
1187         case CHIP_BONAIRE:
1188         case CHIP_HAWAII:
1189         case CHIP_KAVERI:
1190         case CHIP_KABINI:
1191         case CHIP_MULLINS:
1192         case CHIP_TONGA:
1193         case CHIP_FIJI:
1194         case CHIP_CARRIZO:
1195         case CHIP_STONEY:
1196         case CHIP_POLARIS11:
1197         case CHIP_POLARIS10:
1198         case CHIP_POLARIS12:
1199         case CHIP_VEGAM:
1200         case CHIP_VEGA10:
1201         case CHIP_VEGA12:
1202         case CHIP_VEGA20:
1203         case CHIP_NAVI10:
1204         case CHIP_NAVI14:
1205         case CHIP_RENOIR:
1206         case CHIP_SIENNA_CICHLID:
1207         case CHIP_NAVY_FLOUNDER:
1208         case CHIP_DIMGREY_CAVEFISH:
1209         case CHIP_VANGOGH:
1210                 return 0;
1211         case CHIP_NAVI12:
1212                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1213                 break;
1214         case CHIP_RAVEN:
1215                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1216                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1217                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1218                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1219                 else
1220                         return 0;
1221                 break;
1222         default:
1223                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1224                 return -EINVAL;
1225         }
1226
1227         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1228                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1229                 return 0;
1230         }
1231
1232         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1233         if (r == -ENOENT) {
1234                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1235                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1236                 adev->dm.fw_dmcu = NULL;
1237                 return 0;
1238         }
1239         if (r) {
1240                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1241                         fw_name_dmcu);
1242                 return r;
1243         }
1244
1245         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1246         if (r) {
1247                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1248                         fw_name_dmcu);
1249                 release_firmware(adev->dm.fw_dmcu);
1250                 adev->dm.fw_dmcu = NULL;
1251                 return r;
1252         }
1253
1254         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1255         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1256         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1257         adev->firmware.fw_size +=
1258                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1259
1260         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1261         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1262         adev->firmware.fw_size +=
1263                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1264
1265         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1266
1267         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1268
1269         return 0;
1270 }
1271
1272 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1273 {
1274         struct amdgpu_device *adev = ctx;
1275
1276         return dm_read_reg(adev->dm.dc->ctx, address);
1277 }
1278
1279 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1280                                      uint32_t value)
1281 {
1282         struct amdgpu_device *adev = ctx;
1283
1284         return dm_write_reg(adev->dm.dc->ctx, address, value);
1285 }
1286
1287 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1288 {
1289         struct dmub_srv_create_params create_params;
1290         struct dmub_srv_region_params region_params;
1291         struct dmub_srv_region_info region_info;
1292         struct dmub_srv_fb_params fb_params;
1293         struct dmub_srv_fb_info *fb_info;
1294         struct dmub_srv *dmub_srv;
1295         const struct dmcub_firmware_header_v1_0 *hdr;
1296         const char *fw_name_dmub;
1297         enum dmub_asic dmub_asic;
1298         enum dmub_status status;
1299         int r;
1300
1301         switch (adev->asic_type) {
1302         case CHIP_RENOIR:
1303                 dmub_asic = DMUB_ASIC_DCN21;
1304                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1305                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1306                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1307                 break;
1308         case CHIP_SIENNA_CICHLID:
1309                 dmub_asic = DMUB_ASIC_DCN30;
1310                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1311                 break;
1312         case CHIP_NAVY_FLOUNDER:
1313                 dmub_asic = DMUB_ASIC_DCN30;
1314                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1315                 break;
1316         case CHIP_VANGOGH:
1317                 dmub_asic = DMUB_ASIC_DCN301;
1318                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1319                 break;
1320         case CHIP_DIMGREY_CAVEFISH:
1321                 dmub_asic = DMUB_ASIC_DCN302;
1322                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1323                 break;
1324
1325         default:
1326                 /* ASIC doesn't support DMUB. */
1327                 return 0;
1328         }
1329
1330         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1331         if (r) {
1332                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1333                 return 0;
1334         }
1335
1336         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1337         if (r) {
1338                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1339                 return 0;
1340         }
1341
1342         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1343
1344         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1345                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1346                         AMDGPU_UCODE_ID_DMCUB;
1347                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1348                         adev->dm.dmub_fw;
1349                 adev->firmware.fw_size +=
1350                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1351
1352                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1353                          adev->dm.dmcub_fw_version);
1354         }
1355
1356         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1357
1358         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1359         dmub_srv = adev->dm.dmub_srv;
1360
1361         if (!dmub_srv) {
1362                 DRM_ERROR("Failed to allocate DMUB service!\n");
1363                 return -ENOMEM;
1364         }
1365
1366         memset(&create_params, 0, sizeof(create_params));
1367         create_params.user_ctx = adev;
1368         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1369         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1370         create_params.asic = dmub_asic;
1371
1372         /* Create the DMUB service. */
1373         status = dmub_srv_create(dmub_srv, &create_params);
1374         if (status != DMUB_STATUS_OK) {
1375                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1376                 return -EINVAL;
1377         }
1378
1379         /* Calculate the size of all the regions for the DMUB service. */
1380         memset(&region_params, 0, sizeof(region_params));
1381
1382         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1383                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1384         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1385         region_params.vbios_size = adev->bios_size;
1386         region_params.fw_bss_data = region_params.bss_data_size ?
1387                 adev->dm.dmub_fw->data +
1388                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1389                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1390         region_params.fw_inst_const =
1391                 adev->dm.dmub_fw->data +
1392                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1393                 PSP_HEADER_BYTES;
1394
1395         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1396                                            &region_info);
1397
1398         if (status != DMUB_STATUS_OK) {
1399                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1400                 return -EINVAL;
1401         }
1402
1403         /*
1404          * Allocate a framebuffer based on the total size of all the regions.
1405          * TODO: Move this into GART.
1406          */
1407         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1408                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1409                                     &adev->dm.dmub_bo_gpu_addr,
1410                                     &adev->dm.dmub_bo_cpu_addr);
1411         if (r)
1412                 return r;
1413
1414         /* Rebase the regions on the framebuffer address. */
1415         memset(&fb_params, 0, sizeof(fb_params));
1416         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1417         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1418         fb_params.region_info = &region_info;
1419
1420         adev->dm.dmub_fb_info =
1421                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1422         fb_info = adev->dm.dmub_fb_info;
1423
1424         if (!fb_info) {
1425                 DRM_ERROR(
1426                         "Failed to allocate framebuffer info for DMUB service!\n");
1427                 return -ENOMEM;
1428         }
1429
1430         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1431         if (status != DMUB_STATUS_OK) {
1432                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1433                 return -EINVAL;
1434         }
1435
1436         return 0;
1437 }
1438
1439 static int dm_sw_init(void *handle)
1440 {
1441         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1442         int r;
1443
1444         r = dm_dmub_sw_init(adev);
1445         if (r)
1446                 return r;
1447
1448         return load_dmcu_fw(adev);
1449 }
1450
1451 static int dm_sw_fini(void *handle)
1452 {
1453         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1454
1455         kfree(adev->dm.dmub_fb_info);
1456         adev->dm.dmub_fb_info = NULL;
1457
1458         if (adev->dm.dmub_srv) {
1459                 dmub_srv_destroy(adev->dm.dmub_srv);
1460                 adev->dm.dmub_srv = NULL;
1461         }
1462
1463         release_firmware(adev->dm.dmub_fw);
1464         adev->dm.dmub_fw = NULL;
1465
1466         release_firmware(adev->dm.fw_dmcu);
1467         adev->dm.fw_dmcu = NULL;
1468
1469         return 0;
1470 }
1471
1472 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1473 {
1474         struct amdgpu_dm_connector *aconnector;
1475         struct drm_connector *connector;
1476         struct drm_connector_list_iter iter;
1477         int ret = 0;
1478
1479         drm_connector_list_iter_begin(dev, &iter);
1480         drm_for_each_connector_iter(connector, &iter) {
1481                 aconnector = to_amdgpu_dm_connector(connector);
1482                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1483                     aconnector->mst_mgr.aux) {
1484                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1485                                          aconnector,
1486                                          aconnector->base.base.id);
1487
1488                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1489                         if (ret < 0) {
1490                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1491                                 aconnector->dc_link->type =
1492                                         dc_connection_single;
1493                                 break;
1494                         }
1495                 }
1496         }
1497         drm_connector_list_iter_end(&iter);
1498
1499         return ret;
1500 }
1501
1502 static int dm_late_init(void *handle)
1503 {
1504         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1505
1506         struct dmcu_iram_parameters params;
1507         unsigned int linear_lut[16];
1508         int i;
1509         struct dmcu *dmcu = NULL;
1510         bool ret = true;
1511
1512         dmcu = adev->dm.dc->res_pool->dmcu;
1513
1514         for (i = 0; i < 16; i++)
1515                 linear_lut[i] = 0xFFFF * i / 15;
1516
1517         params.set = 0;
1518         params.backlight_ramping_start = 0xCCCC;
1519         params.backlight_ramping_reduction = 0xCCCCCCCC;
1520         params.backlight_lut_array_size = 16;
1521         params.backlight_lut_array = linear_lut;
1522
1523         /* Min backlight level after ABM reduction,  Don't allow below 1%
1524          * 0xFFFF x 0.01 = 0x28F
1525          */
1526         params.min_abm_backlight = 0x28F;
1527
1528         /* In the case where abm is implemented on dmcub,
1529          * dmcu object will be null.
1530          * ABM 2.4 and up are implemented on dmcub.
1531          */
1532         if (dmcu)
1533                 ret = dmcu_load_iram(dmcu, params);
1534         else if (adev->dm.dc->ctx->dmub_srv)
1535                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1536
1537         if (!ret)
1538                 return -EINVAL;
1539
1540         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1541 }
1542
1543 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1544 {
1545         struct amdgpu_dm_connector *aconnector;
1546         struct drm_connector *connector;
1547         struct drm_connector_list_iter iter;
1548         struct drm_dp_mst_topology_mgr *mgr;
1549         int ret;
1550         bool need_hotplug = false;
1551
1552         drm_connector_list_iter_begin(dev, &iter);
1553         drm_for_each_connector_iter(connector, &iter) {
1554                 aconnector = to_amdgpu_dm_connector(connector);
1555                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1556                     aconnector->mst_port)
1557                         continue;
1558
1559                 mgr = &aconnector->mst_mgr;
1560
1561                 if (suspend) {
1562                         drm_dp_mst_topology_mgr_suspend(mgr);
1563                 } else {
1564                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1565                         if (ret < 0) {
1566                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1567                                 need_hotplug = true;
1568                         }
1569                 }
1570         }
1571         drm_connector_list_iter_end(&iter);
1572
1573         if (need_hotplug)
1574                 drm_kms_helper_hotplug_event(dev);
1575 }
1576
1577 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1578 {
1579         struct smu_context *smu = &adev->smu;
1580         int ret = 0;
1581
1582         if (!is_support_sw_smu(adev))
1583                 return 0;
1584
1585         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1586          * on window driver dc implementation.
1587          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1588          * should be passed to smu during boot up and resume from s3.
1589          * boot up: dc calculate dcn watermark clock settings within dc_create,
1590          * dcn20_resource_construct
1591          * then call pplib functions below to pass the settings to smu:
1592          * smu_set_watermarks_for_clock_ranges
1593          * smu_set_watermarks_table
1594          * navi10_set_watermarks_table
1595          * smu_write_watermarks_table
1596          *
1597          * For Renoir, clock settings of dcn watermark are also fixed values.
1598          * dc has implemented different flow for window driver:
1599          * dc_hardware_init / dc_set_power_state
1600          * dcn10_init_hw
1601          * notify_wm_ranges
1602          * set_wm_ranges
1603          * -- Linux
1604          * smu_set_watermarks_for_clock_ranges
1605          * renoir_set_watermarks_table
1606          * smu_write_watermarks_table
1607          *
1608          * For Linux,
1609          * dc_hardware_init -> amdgpu_dm_init
1610          * dc_set_power_state --> dm_resume
1611          *
1612          * therefore, this function apply to navi10/12/14 but not Renoir
1613          * *
1614          */
1615         switch(adev->asic_type) {
1616         case CHIP_NAVI10:
1617         case CHIP_NAVI14:
1618         case CHIP_NAVI12:
1619                 break;
1620         default:
1621                 return 0;
1622         }
1623
1624         ret = smu_write_watermarks_table(smu);
1625         if (ret) {
1626                 DRM_ERROR("Failed to update WMTABLE!\n");
1627                 return ret;
1628         }
1629
1630         return 0;
1631 }
1632
1633 /**
1634  * dm_hw_init() - Initialize DC device
1635  * @handle: The base driver device containing the amdgpu_dm device.
1636  *
1637  * Initialize the &struct amdgpu_display_manager device. This involves calling
1638  * the initializers of each DM component, then populating the struct with them.
1639  *
1640  * Although the function implies hardware initialization, both hardware and
1641  * software are initialized here. Splitting them out to their relevant init
1642  * hooks is a future TODO item.
1643  *
1644  * Some notable things that are initialized here:
1645  *
1646  * - Display Core, both software and hardware
1647  * - DC modules that we need (freesync and color management)
1648  * - DRM software states
1649  * - Interrupt sources and handlers
1650  * - Vblank support
1651  * - Debug FS entries, if enabled
1652  */
1653 static int dm_hw_init(void *handle)
1654 {
1655         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656         /* Create DAL display manager */
1657         amdgpu_dm_init(adev);
1658         amdgpu_dm_hpd_init(adev);
1659
1660         return 0;
1661 }
1662
1663 /**
1664  * dm_hw_fini() - Teardown DC device
1665  * @handle: The base driver device containing the amdgpu_dm device.
1666  *
1667  * Teardown components within &struct amdgpu_display_manager that require
1668  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1669  * were loaded. Also flush IRQ workqueues and disable them.
1670  */
1671 static int dm_hw_fini(void *handle)
1672 {
1673         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1674
1675         amdgpu_dm_hpd_fini(adev);
1676
1677         amdgpu_dm_irq_fini(adev);
1678         amdgpu_dm_fini(adev);
1679         return 0;
1680 }
1681
1682
1683 static int dm_enable_vblank(struct drm_crtc *crtc);
1684 static void dm_disable_vblank(struct drm_crtc *crtc);
1685
1686 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1687                                  struct dc_state *state, bool enable)
1688 {
1689         enum dc_irq_source irq_source;
1690         struct amdgpu_crtc *acrtc;
1691         int rc = -EBUSY;
1692         int i = 0;
1693
1694         for (i = 0; i < state->stream_count; i++) {
1695                 acrtc = get_crtc_by_otg_inst(
1696                                 adev, state->stream_status[i].primary_otg_inst);
1697
1698                 if (acrtc && state->stream_status[i].plane_count != 0) {
1699                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1700                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1701                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1702                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1703                         if (rc)
1704                                 DRM_WARN("Failed to %s pflip interrupts\n",
1705                                          enable ? "enable" : "disable");
1706
1707                         if (enable) {
1708                                 rc = dm_enable_vblank(&acrtc->base);
1709                                 if (rc)
1710                                         DRM_WARN("Failed to enable vblank interrupts\n");
1711                         } else {
1712                                 dm_disable_vblank(&acrtc->base);
1713                         }
1714
1715                 }
1716         }
1717
1718 }
1719
1720 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1721 {
1722         struct dc_state *context = NULL;
1723         enum dc_status res = DC_ERROR_UNEXPECTED;
1724         int i;
1725         struct dc_stream_state *del_streams[MAX_PIPES];
1726         int del_streams_count = 0;
1727
1728         memset(del_streams, 0, sizeof(del_streams));
1729
1730         context = dc_create_state(dc);
1731         if (context == NULL)
1732                 goto context_alloc_fail;
1733
1734         dc_resource_state_copy_construct_current(dc, context);
1735
1736         /* First remove from context all streams */
1737         for (i = 0; i < context->stream_count; i++) {
1738                 struct dc_stream_state *stream = context->streams[i];
1739
1740                 del_streams[del_streams_count++] = stream;
1741         }
1742
1743         /* Remove all planes for removed streams and then remove the streams */
1744         for (i = 0; i < del_streams_count; i++) {
1745                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1746                         res = DC_FAIL_DETACH_SURFACES;
1747                         goto fail;
1748                 }
1749
1750                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1751                 if (res != DC_OK)
1752                         goto fail;
1753         }
1754
1755
1756         res = dc_validate_global_state(dc, context, false);
1757
1758         if (res != DC_OK) {
1759                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1760                 goto fail;
1761         }
1762
1763         res = dc_commit_state(dc, context);
1764
1765 fail:
1766         dc_release_state(context);
1767
1768 context_alloc_fail:
1769         return res;
1770 }
1771
1772 static int dm_suspend(void *handle)
1773 {
1774         struct amdgpu_device *adev = handle;
1775         struct amdgpu_display_manager *dm = &adev->dm;
1776         int ret = 0;
1777
1778         if (amdgpu_in_reset(adev)) {
1779                 mutex_lock(&dm->dc_lock);
1780
1781 #if defined(CONFIG_DRM_AMD_DC_DCN)
1782                 dc_allow_idle_optimizations(adev->dm.dc, false);
1783 #endif
1784
1785                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1786
1787                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1788
1789                 amdgpu_dm_commit_zero_streams(dm->dc);
1790
1791                 amdgpu_dm_irq_suspend(adev);
1792
1793                 return ret;
1794         }
1795
1796         WARN_ON(adev->dm.cached_state);
1797         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1798
1799         s3_handle_mst(adev_to_drm(adev), true);
1800
1801         amdgpu_dm_irq_suspend(adev);
1802
1803
1804         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1805
1806         return 0;
1807 }
1808
1809 static struct amdgpu_dm_connector *
1810 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1811                                              struct drm_crtc *crtc)
1812 {
1813         uint32_t i;
1814         struct drm_connector_state *new_con_state;
1815         struct drm_connector *connector;
1816         struct drm_crtc *crtc_from_state;
1817
1818         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1819                 crtc_from_state = new_con_state->crtc;
1820
1821                 if (crtc_from_state == crtc)
1822                         return to_amdgpu_dm_connector(connector);
1823         }
1824
1825         return NULL;
1826 }
1827
1828 static void emulated_link_detect(struct dc_link *link)
1829 {
1830         struct dc_sink_init_data sink_init_data = { 0 };
1831         struct display_sink_capability sink_caps = { 0 };
1832         enum dc_edid_status edid_status;
1833         struct dc_context *dc_ctx = link->ctx;
1834         struct dc_sink *sink = NULL;
1835         struct dc_sink *prev_sink = NULL;
1836
1837         link->type = dc_connection_none;
1838         prev_sink = link->local_sink;
1839
1840         if (prev_sink != NULL)
1841                 dc_sink_retain(prev_sink);
1842
1843         switch (link->connector_signal) {
1844         case SIGNAL_TYPE_HDMI_TYPE_A: {
1845                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1846                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1847                 break;
1848         }
1849
1850         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1851                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1852                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1853                 break;
1854         }
1855
1856         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1857                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1858                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1859                 break;
1860         }
1861
1862         case SIGNAL_TYPE_LVDS: {
1863                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1864                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1865                 break;
1866         }
1867
1868         case SIGNAL_TYPE_EDP: {
1869                 sink_caps.transaction_type =
1870                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1871                 sink_caps.signal = SIGNAL_TYPE_EDP;
1872                 break;
1873         }
1874
1875         case SIGNAL_TYPE_DISPLAY_PORT: {
1876                 sink_caps.transaction_type =
1877                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1878                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1879                 break;
1880         }
1881
1882         default:
1883                 DC_ERROR("Invalid connector type! signal:%d\n",
1884                         link->connector_signal);
1885                 return;
1886         }
1887
1888         sink_init_data.link = link;
1889         sink_init_data.sink_signal = sink_caps.signal;
1890
1891         sink = dc_sink_create(&sink_init_data);
1892         if (!sink) {
1893                 DC_ERROR("Failed to create sink!\n");
1894                 return;
1895         }
1896
1897         /* dc_sink_create returns a new reference */
1898         link->local_sink = sink;
1899
1900         edid_status = dm_helpers_read_local_edid(
1901                         link->ctx,
1902                         link,
1903                         sink);
1904
1905         if (edid_status != EDID_OK)
1906                 DC_ERROR("Failed to read EDID");
1907
1908 }
1909
1910 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1911                                      struct amdgpu_display_manager *dm)
1912 {
1913         struct {
1914                 struct dc_surface_update surface_updates[MAX_SURFACES];
1915                 struct dc_plane_info plane_infos[MAX_SURFACES];
1916                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1917                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1918                 struct dc_stream_update stream_update;
1919         } * bundle;
1920         int k, m;
1921
1922         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1923
1924         if (!bundle) {
1925                 dm_error("Failed to allocate update bundle\n");
1926                 goto cleanup;
1927         }
1928
1929         for (k = 0; k < dc_state->stream_count; k++) {
1930                 bundle->stream_update.stream = dc_state->streams[k];
1931
1932                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1933                         bundle->surface_updates[m].surface =
1934                                 dc_state->stream_status->plane_states[m];
1935                         bundle->surface_updates[m].surface->force_full_update =
1936                                 true;
1937                 }
1938                 dc_commit_updates_for_stream(
1939                         dm->dc, bundle->surface_updates,
1940                         dc_state->stream_status->plane_count,
1941                         dc_state->streams[k], &bundle->stream_update, dc_state);
1942         }
1943
1944 cleanup:
1945         kfree(bundle);
1946
1947         return;
1948 }
1949
1950 static void dm_set_dpms_off(struct dc_link *link)
1951 {
1952         struct dc_stream_state *stream_state;
1953         struct amdgpu_dm_connector *aconnector = link->priv;
1954         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1955         struct dc_stream_update stream_update;
1956         bool dpms_off = true;
1957
1958         memset(&stream_update, 0, sizeof(stream_update));
1959         stream_update.dpms_off = &dpms_off;
1960
1961         mutex_lock(&adev->dm.dc_lock);
1962         stream_state = dc_stream_find_from_link(link);
1963
1964         if (stream_state == NULL) {
1965                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1966                 mutex_unlock(&adev->dm.dc_lock);
1967                 return;
1968         }
1969
1970         stream_update.stream = stream_state;
1971         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1972                                      stream_state, &stream_update,
1973                                      stream_state->ctx->dc->current_state);
1974         mutex_unlock(&adev->dm.dc_lock);
1975 }
1976
1977 static int dm_resume(void *handle)
1978 {
1979         struct amdgpu_device *adev = handle;
1980         struct drm_device *ddev = adev_to_drm(adev);
1981         struct amdgpu_display_manager *dm = &adev->dm;
1982         struct amdgpu_dm_connector *aconnector;
1983         struct drm_connector *connector;
1984         struct drm_connector_list_iter iter;
1985         struct drm_crtc *crtc;
1986         struct drm_crtc_state *new_crtc_state;
1987         struct dm_crtc_state *dm_new_crtc_state;
1988         struct drm_plane *plane;
1989         struct drm_plane_state *new_plane_state;
1990         struct dm_plane_state *dm_new_plane_state;
1991         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1992         enum dc_connection_type new_connection_type = dc_connection_none;
1993         struct dc_state *dc_state;
1994         int i, r, j;
1995
1996         if (amdgpu_in_reset(adev)) {
1997                 dc_state = dm->cached_dc_state;
1998
1999                 r = dm_dmub_hw_init(adev);
2000                 if (r)
2001                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2002
2003                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2004                 dc_resume(dm->dc);
2005
2006                 amdgpu_dm_irq_resume_early(adev);
2007
2008                 for (i = 0; i < dc_state->stream_count; i++) {
2009                         dc_state->streams[i]->mode_changed = true;
2010                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2011                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2012                                         = 0xffffffff;
2013                         }
2014                 }
2015
2016                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2017
2018                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2019
2020                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2021
2022                 dc_release_state(dm->cached_dc_state);
2023                 dm->cached_dc_state = NULL;
2024
2025                 amdgpu_dm_irq_resume_late(adev);
2026
2027                 mutex_unlock(&dm->dc_lock);
2028
2029                 return 0;
2030         }
2031         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2032         dc_release_state(dm_state->context);
2033         dm_state->context = dc_create_state(dm->dc);
2034         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2035         dc_resource_state_construct(dm->dc, dm_state->context);
2036
2037         /* Before powering on DC we need to re-initialize DMUB. */
2038         r = dm_dmub_hw_init(adev);
2039         if (r)
2040                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2041
2042         /* power on hardware */
2043         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2044
2045         /* program HPD filter */
2046         dc_resume(dm->dc);
2047
2048         /*
2049          * early enable HPD Rx IRQ, should be done before set mode as short
2050          * pulse interrupts are used for MST
2051          */
2052         amdgpu_dm_irq_resume_early(adev);
2053
2054         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2055         s3_handle_mst(ddev, false);
2056
2057         /* Do detection*/
2058         drm_connector_list_iter_begin(ddev, &iter);
2059         drm_for_each_connector_iter(connector, &iter) {
2060                 aconnector = to_amdgpu_dm_connector(connector);
2061
2062                 /*
2063                  * this is the case when traversing through already created
2064                  * MST connectors, should be skipped
2065                  */
2066                 if (aconnector->mst_port)
2067                         continue;
2068
2069                 mutex_lock(&aconnector->hpd_lock);
2070                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2071                         DRM_ERROR("KMS: Failed to detect connector\n");
2072
2073                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2074                         emulated_link_detect(aconnector->dc_link);
2075                 else
2076                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2077
2078                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2079                         aconnector->fake_enable = false;
2080
2081                 if (aconnector->dc_sink)
2082                         dc_sink_release(aconnector->dc_sink);
2083                 aconnector->dc_sink = NULL;
2084                 amdgpu_dm_update_connector_after_detect(aconnector);
2085                 mutex_unlock(&aconnector->hpd_lock);
2086         }
2087         drm_connector_list_iter_end(&iter);
2088
2089         /* Force mode set in atomic commit */
2090         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2091                 new_crtc_state->active_changed = true;
2092
2093         /*
2094          * atomic_check is expected to create the dc states. We need to release
2095          * them here, since they were duplicated as part of the suspend
2096          * procedure.
2097          */
2098         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2099                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2100                 if (dm_new_crtc_state->stream) {
2101                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2102                         dc_stream_release(dm_new_crtc_state->stream);
2103                         dm_new_crtc_state->stream = NULL;
2104                 }
2105         }
2106
2107         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2108                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2109                 if (dm_new_plane_state->dc_state) {
2110                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2111                         dc_plane_state_release(dm_new_plane_state->dc_state);
2112                         dm_new_plane_state->dc_state = NULL;
2113                 }
2114         }
2115
2116         drm_atomic_helper_resume(ddev, dm->cached_state);
2117
2118         dm->cached_state = NULL;
2119
2120         amdgpu_dm_irq_resume_late(adev);
2121
2122         amdgpu_dm_smu_write_watermarks_table(adev);
2123
2124         return 0;
2125 }
2126
2127 /**
2128  * DOC: DM Lifecycle
2129  *
2130  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2131  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2132  * the base driver's device list to be initialized and torn down accordingly.
2133  *
2134  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2135  */
2136
2137 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2138         .name = "dm",
2139         .early_init = dm_early_init,
2140         .late_init = dm_late_init,
2141         .sw_init = dm_sw_init,
2142         .sw_fini = dm_sw_fini,
2143         .hw_init = dm_hw_init,
2144         .hw_fini = dm_hw_fini,
2145         .suspend = dm_suspend,
2146         .resume = dm_resume,
2147         .is_idle = dm_is_idle,
2148         .wait_for_idle = dm_wait_for_idle,
2149         .check_soft_reset = dm_check_soft_reset,
2150         .soft_reset = dm_soft_reset,
2151         .set_clockgating_state = dm_set_clockgating_state,
2152         .set_powergating_state = dm_set_powergating_state,
2153 };
2154
2155 const struct amdgpu_ip_block_version dm_ip_block =
2156 {
2157         .type = AMD_IP_BLOCK_TYPE_DCE,
2158         .major = 1,
2159         .minor = 0,
2160         .rev = 0,
2161         .funcs = &amdgpu_dm_funcs,
2162 };
2163
2164
2165 /**
2166  * DOC: atomic
2167  *
2168  * *WIP*
2169  */
2170
2171 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2172         .fb_create = amdgpu_display_user_framebuffer_create,
2173         .get_format_info = amd_get_format_info,
2174         .output_poll_changed = drm_fb_helper_output_poll_changed,
2175         .atomic_check = amdgpu_dm_atomic_check,
2176         .atomic_commit = drm_atomic_helper_commit,
2177 };
2178
2179 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2180         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2181 };
2182
2183 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2184 {
2185         u32 max_cll, min_cll, max, min, q, r;
2186         struct amdgpu_dm_backlight_caps *caps;
2187         struct amdgpu_display_manager *dm;
2188         struct drm_connector *conn_base;
2189         struct amdgpu_device *adev;
2190         struct dc_link *link = NULL;
2191         static const u8 pre_computed_values[] = {
2192                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2193                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2194
2195         if (!aconnector || !aconnector->dc_link)
2196                 return;
2197
2198         link = aconnector->dc_link;
2199         if (link->connector_signal != SIGNAL_TYPE_EDP)
2200                 return;
2201
2202         conn_base = &aconnector->base;
2203         adev = drm_to_adev(conn_base->dev);
2204         dm = &adev->dm;
2205         caps = &dm->backlight_caps;
2206         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2207         caps->aux_support = false;
2208         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2209         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2210
2211         if (caps->ext_caps->bits.oled == 1 ||
2212             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2213             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2214                 caps->aux_support = true;
2215
2216         /* From the specification (CTA-861-G), for calculating the maximum
2217          * luminance we need to use:
2218          *      Luminance = 50*2**(CV/32)
2219          * Where CV is a one-byte value.
2220          * For calculating this expression we may need float point precision;
2221          * to avoid this complexity level, we take advantage that CV is divided
2222          * by a constant. From the Euclids division algorithm, we know that CV
2223          * can be written as: CV = 32*q + r. Next, we replace CV in the
2224          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2225          * need to pre-compute the value of r/32. For pre-computing the values
2226          * We just used the following Ruby line:
2227          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2228          * The results of the above expressions can be verified at
2229          * pre_computed_values.
2230          */
2231         q = max_cll >> 5;
2232         r = max_cll % 32;
2233         max = (1 << q) * pre_computed_values[r];
2234
2235         // min luminance: maxLum * (CV/255)^2 / 100
2236         q = DIV_ROUND_CLOSEST(min_cll, 255);
2237         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2238
2239         caps->aux_max_input_signal = max;
2240         caps->aux_min_input_signal = min;
2241 }
2242
2243 void amdgpu_dm_update_connector_after_detect(
2244                 struct amdgpu_dm_connector *aconnector)
2245 {
2246         struct drm_connector *connector = &aconnector->base;
2247         struct drm_device *dev = connector->dev;
2248         struct dc_sink *sink;
2249
2250         /* MST handled by drm_mst framework */
2251         if (aconnector->mst_mgr.mst_state == true)
2252                 return;
2253
2254         sink = aconnector->dc_link->local_sink;
2255         if (sink)
2256                 dc_sink_retain(sink);
2257
2258         /*
2259          * Edid mgmt connector gets first update only in mode_valid hook and then
2260          * the connector sink is set to either fake or physical sink depends on link status.
2261          * Skip if already done during boot.
2262          */
2263         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2264                         && aconnector->dc_em_sink) {
2265
2266                 /*
2267                  * For S3 resume with headless use eml_sink to fake stream
2268                  * because on resume connector->sink is set to NULL
2269                  */
2270                 mutex_lock(&dev->mode_config.mutex);
2271
2272                 if (sink) {
2273                         if (aconnector->dc_sink) {
2274                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2275                                 /*
2276                                  * retain and release below are used to
2277                                  * bump up refcount for sink because the link doesn't point
2278                                  * to it anymore after disconnect, so on next crtc to connector
2279                                  * reshuffle by UMD we will get into unwanted dc_sink release
2280                                  */
2281                                 dc_sink_release(aconnector->dc_sink);
2282                         }
2283                         aconnector->dc_sink = sink;
2284                         dc_sink_retain(aconnector->dc_sink);
2285                         amdgpu_dm_update_freesync_caps(connector,
2286                                         aconnector->edid);
2287                 } else {
2288                         amdgpu_dm_update_freesync_caps(connector, NULL);
2289                         if (!aconnector->dc_sink) {
2290                                 aconnector->dc_sink = aconnector->dc_em_sink;
2291                                 dc_sink_retain(aconnector->dc_sink);
2292                         }
2293                 }
2294
2295                 mutex_unlock(&dev->mode_config.mutex);
2296
2297                 if (sink)
2298                         dc_sink_release(sink);
2299                 return;
2300         }
2301
2302         /*
2303          * TODO: temporary guard to look for proper fix
2304          * if this sink is MST sink, we should not do anything
2305          */
2306         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2307                 dc_sink_release(sink);
2308                 return;
2309         }
2310
2311         if (aconnector->dc_sink == sink) {
2312                 /*
2313                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2314                  * Do nothing!!
2315                  */
2316                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2317                                 aconnector->connector_id);
2318                 if (sink)
2319                         dc_sink_release(sink);
2320                 return;
2321         }
2322
2323         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2324                 aconnector->connector_id, aconnector->dc_sink, sink);
2325
2326         mutex_lock(&dev->mode_config.mutex);
2327
2328         /*
2329          * 1. Update status of the drm connector
2330          * 2. Send an event and let userspace tell us what to do
2331          */
2332         if (sink) {
2333                 /*
2334                  * TODO: check if we still need the S3 mode update workaround.
2335                  * If yes, put it here.
2336                  */
2337                 if (aconnector->dc_sink)
2338                         amdgpu_dm_update_freesync_caps(connector, NULL);
2339
2340                 aconnector->dc_sink = sink;
2341                 dc_sink_retain(aconnector->dc_sink);
2342                 if (sink->dc_edid.length == 0) {
2343                         aconnector->edid = NULL;
2344                         if (aconnector->dc_link->aux_mode) {
2345                                 drm_dp_cec_unset_edid(
2346                                         &aconnector->dm_dp_aux.aux);
2347                         }
2348                 } else {
2349                         aconnector->edid =
2350                                 (struct edid *)sink->dc_edid.raw_edid;
2351
2352                         drm_connector_update_edid_property(connector,
2353                                                            aconnector->edid);
2354                         drm_add_edid_modes(connector, aconnector->edid);
2355
2356                         if (aconnector->dc_link->aux_mode)
2357                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2358                                                     aconnector->edid);
2359                 }
2360
2361                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2362                 update_connector_ext_caps(aconnector);
2363         } else {
2364                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2365                 amdgpu_dm_update_freesync_caps(connector, NULL);
2366                 drm_connector_update_edid_property(connector, NULL);
2367                 aconnector->num_modes = 0;
2368                 dc_sink_release(aconnector->dc_sink);
2369                 aconnector->dc_sink = NULL;
2370                 aconnector->edid = NULL;
2371 #ifdef CONFIG_DRM_AMD_DC_HDCP
2372                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2373                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2374                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2375 #endif
2376         }
2377
2378         mutex_unlock(&dev->mode_config.mutex);
2379
2380         update_subconnector_property(aconnector);
2381
2382         if (sink)
2383                 dc_sink_release(sink);
2384 }
2385
2386 static void handle_hpd_irq(void *param)
2387 {
2388         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2389         struct drm_connector *connector = &aconnector->base;
2390         struct drm_device *dev = connector->dev;
2391         enum dc_connection_type new_connection_type = dc_connection_none;
2392 #ifdef CONFIG_DRM_AMD_DC_HDCP
2393         struct amdgpu_device *adev = drm_to_adev(dev);
2394         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2395 #endif
2396
2397         /*
2398          * In case of failure or MST no need to update connector status or notify the OS
2399          * since (for MST case) MST does this in its own context.
2400          */
2401         mutex_lock(&aconnector->hpd_lock);
2402
2403 #ifdef CONFIG_DRM_AMD_DC_HDCP
2404         if (adev->dm.hdcp_workqueue) {
2405                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2406                 dm_con_state->update_hdcp = true;
2407         }
2408 #endif
2409         if (aconnector->fake_enable)
2410                 aconnector->fake_enable = false;
2411
2412         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2413                 DRM_ERROR("KMS: Failed to detect connector\n");
2414
2415         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2416                 emulated_link_detect(aconnector->dc_link);
2417
2418
2419                 drm_modeset_lock_all(dev);
2420                 dm_restore_drm_connector_state(dev, connector);
2421                 drm_modeset_unlock_all(dev);
2422
2423                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2424                         drm_kms_helper_hotplug_event(dev);
2425
2426         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2427                 if (new_connection_type == dc_connection_none &&
2428                     aconnector->dc_link->type == dc_connection_none)
2429                         dm_set_dpms_off(aconnector->dc_link);
2430
2431                 amdgpu_dm_update_connector_after_detect(aconnector);
2432
2433                 drm_modeset_lock_all(dev);
2434                 dm_restore_drm_connector_state(dev, connector);
2435                 drm_modeset_unlock_all(dev);
2436
2437                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2438                         drm_kms_helper_hotplug_event(dev);
2439         }
2440         mutex_unlock(&aconnector->hpd_lock);
2441
2442 }
2443
2444 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2445 {
2446         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2447         uint8_t dret;
2448         bool new_irq_handled = false;
2449         int dpcd_addr;
2450         int dpcd_bytes_to_read;
2451
2452         const int max_process_count = 30;
2453         int process_count = 0;
2454
2455         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2456
2457         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2458                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2459                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2460                 dpcd_addr = DP_SINK_COUNT;
2461         } else {
2462                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2463                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2464                 dpcd_addr = DP_SINK_COUNT_ESI;
2465         }
2466
2467         dret = drm_dp_dpcd_read(
2468                 &aconnector->dm_dp_aux.aux,
2469                 dpcd_addr,
2470                 esi,
2471                 dpcd_bytes_to_read);
2472
2473         while (dret == dpcd_bytes_to_read &&
2474                 process_count < max_process_count) {
2475                 uint8_t retry;
2476                 dret = 0;
2477
2478                 process_count++;
2479
2480                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2481                 /* handle HPD short pulse irq */
2482                 if (aconnector->mst_mgr.mst_state)
2483                         drm_dp_mst_hpd_irq(
2484                                 &aconnector->mst_mgr,
2485                                 esi,
2486                                 &new_irq_handled);
2487
2488                 if (new_irq_handled) {
2489                         /* ACK at DPCD to notify down stream */
2490                         const int ack_dpcd_bytes_to_write =
2491                                 dpcd_bytes_to_read - 1;
2492
2493                         for (retry = 0; retry < 3; retry++) {
2494                                 uint8_t wret;
2495
2496                                 wret = drm_dp_dpcd_write(
2497                                         &aconnector->dm_dp_aux.aux,
2498                                         dpcd_addr + 1,
2499                                         &esi[1],
2500                                         ack_dpcd_bytes_to_write);
2501                                 if (wret == ack_dpcd_bytes_to_write)
2502                                         break;
2503                         }
2504
2505                         /* check if there is new irq to be handled */
2506                         dret = drm_dp_dpcd_read(
2507                                 &aconnector->dm_dp_aux.aux,
2508                                 dpcd_addr,
2509                                 esi,
2510                                 dpcd_bytes_to_read);
2511
2512                         new_irq_handled = false;
2513                 } else {
2514                         break;
2515                 }
2516         }
2517
2518         if (process_count == max_process_count)
2519                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2520 }
2521
2522 static void handle_hpd_rx_irq(void *param)
2523 {
2524         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2525         struct drm_connector *connector = &aconnector->base;
2526         struct drm_device *dev = connector->dev;
2527         struct dc_link *dc_link = aconnector->dc_link;
2528         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2529         bool result = false;
2530         enum dc_connection_type new_connection_type = dc_connection_none;
2531         struct amdgpu_device *adev = drm_to_adev(dev);
2532         union hpd_irq_data hpd_irq_data;
2533
2534         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2535
2536         /*
2537          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2538          * conflict, after implement i2c helper, this mutex should be
2539          * retired.
2540          */
2541         if (dc_link->type != dc_connection_mst_branch)
2542                 mutex_lock(&aconnector->hpd_lock);
2543
2544         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2545
2546         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2547                 (dc_link->type == dc_connection_mst_branch)) {
2548                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2549                         result = true;
2550                         dm_handle_hpd_rx_irq(aconnector);
2551                         goto out;
2552                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2553                         result = false;
2554                         dm_handle_hpd_rx_irq(aconnector);
2555                         goto out;
2556                 }
2557         }
2558
2559         mutex_lock(&adev->dm.dc_lock);
2560 #ifdef CONFIG_DRM_AMD_DC_HDCP
2561         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2562 #else
2563         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2564 #endif
2565         mutex_unlock(&adev->dm.dc_lock);
2566
2567 out:
2568         if (result && !is_mst_root_connector) {
2569                 /* Downstream Port status changed. */
2570                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2571                         DRM_ERROR("KMS: Failed to detect connector\n");
2572
2573                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2574                         emulated_link_detect(dc_link);
2575
2576                         if (aconnector->fake_enable)
2577                                 aconnector->fake_enable = false;
2578
2579                         amdgpu_dm_update_connector_after_detect(aconnector);
2580
2581
2582                         drm_modeset_lock_all(dev);
2583                         dm_restore_drm_connector_state(dev, connector);
2584                         drm_modeset_unlock_all(dev);
2585
2586                         drm_kms_helper_hotplug_event(dev);
2587                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2588
2589                         if (aconnector->fake_enable)
2590                                 aconnector->fake_enable = false;
2591
2592                         amdgpu_dm_update_connector_after_detect(aconnector);
2593
2594
2595                         drm_modeset_lock_all(dev);
2596                         dm_restore_drm_connector_state(dev, connector);
2597                         drm_modeset_unlock_all(dev);
2598
2599                         drm_kms_helper_hotplug_event(dev);
2600                 }
2601         }
2602 #ifdef CONFIG_DRM_AMD_DC_HDCP
2603         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2604                 if (adev->dm.hdcp_workqueue)
2605                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2606         }
2607 #endif
2608
2609         if (dc_link->type != dc_connection_mst_branch) {
2610                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2611                 mutex_unlock(&aconnector->hpd_lock);
2612         }
2613 }
2614
2615 static void register_hpd_handlers(struct amdgpu_device *adev)
2616 {
2617         struct drm_device *dev = adev_to_drm(adev);
2618         struct drm_connector *connector;
2619         struct amdgpu_dm_connector *aconnector;
2620         const struct dc_link *dc_link;
2621         struct dc_interrupt_params int_params = {0};
2622
2623         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2624         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2625
2626         list_for_each_entry(connector,
2627                         &dev->mode_config.connector_list, head) {
2628
2629                 aconnector = to_amdgpu_dm_connector(connector);
2630                 dc_link = aconnector->dc_link;
2631
2632                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2633                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2634                         int_params.irq_source = dc_link->irq_source_hpd;
2635
2636                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2637                                         handle_hpd_irq,
2638                                         (void *) aconnector);
2639                 }
2640
2641                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2642
2643                         /* Also register for DP short pulse (hpd_rx). */
2644                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2645                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2646
2647                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2648                                         handle_hpd_rx_irq,
2649                                         (void *) aconnector);
2650                 }
2651         }
2652 }
2653
2654 #if defined(CONFIG_DRM_AMD_DC_SI)
2655 /* Register IRQ sources and initialize IRQ callbacks */
2656 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2657 {
2658         struct dc *dc = adev->dm.dc;
2659         struct common_irq_params *c_irq_params;
2660         struct dc_interrupt_params int_params = {0};
2661         int r;
2662         int i;
2663         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2664
2665         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2666         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2667
2668         /*
2669          * Actions of amdgpu_irq_add_id():
2670          * 1. Register a set() function with base driver.
2671          *    Base driver will call set() function to enable/disable an
2672          *    interrupt in DC hardware.
2673          * 2. Register amdgpu_dm_irq_handler().
2674          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2675          *    coming from DC hardware.
2676          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2677          *    for acknowledging and handling. */
2678
2679         /* Use VBLANK interrupt */
2680         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2681                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2682                 if (r) {
2683                         DRM_ERROR("Failed to add crtc irq id!\n");
2684                         return r;
2685                 }
2686
2687                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2688                 int_params.irq_source =
2689                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2690
2691                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2692
2693                 c_irq_params->adev = adev;
2694                 c_irq_params->irq_src = int_params.irq_source;
2695
2696                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2697                                 dm_crtc_high_irq, c_irq_params);
2698         }
2699
2700         /* Use GRPH_PFLIP interrupt */
2701         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2702                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2703                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2704                 if (r) {
2705                         DRM_ERROR("Failed to add page flip irq id!\n");
2706                         return r;
2707                 }
2708
2709                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2710                 int_params.irq_source =
2711                         dc_interrupt_to_irq_source(dc, i, 0);
2712
2713                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2714
2715                 c_irq_params->adev = adev;
2716                 c_irq_params->irq_src = int_params.irq_source;
2717
2718                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2719                                 dm_pflip_high_irq, c_irq_params);
2720
2721         }
2722
2723         /* HPD */
2724         r = amdgpu_irq_add_id(adev, client_id,
2725                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2726         if (r) {
2727                 DRM_ERROR("Failed to add hpd irq id!\n");
2728                 return r;
2729         }
2730
2731         register_hpd_handlers(adev);
2732
2733         return 0;
2734 }
2735 #endif
2736
2737 /* Register IRQ sources and initialize IRQ callbacks */
2738 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2739 {
2740         struct dc *dc = adev->dm.dc;
2741         struct common_irq_params *c_irq_params;
2742         struct dc_interrupt_params int_params = {0};
2743         int r;
2744         int i;
2745         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2746
2747         if (adev->asic_type >= CHIP_VEGA10)
2748                 client_id = SOC15_IH_CLIENTID_DCE;
2749
2750         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2751         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2752
2753         /*
2754          * Actions of amdgpu_irq_add_id():
2755          * 1. Register a set() function with base driver.
2756          *    Base driver will call set() function to enable/disable an
2757          *    interrupt in DC hardware.
2758          * 2. Register amdgpu_dm_irq_handler().
2759          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2760          *    coming from DC hardware.
2761          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2762          *    for acknowledging and handling. */
2763
2764         /* Use VBLANK interrupt */
2765         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2766                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2767                 if (r) {
2768                         DRM_ERROR("Failed to add crtc irq id!\n");
2769                         return r;
2770                 }
2771
2772                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2773                 int_params.irq_source =
2774                         dc_interrupt_to_irq_source(dc, i, 0);
2775
2776                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2777
2778                 c_irq_params->adev = adev;
2779                 c_irq_params->irq_src = int_params.irq_source;
2780
2781                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2782                                 dm_crtc_high_irq, c_irq_params);
2783         }
2784
2785         /* Use VUPDATE interrupt */
2786         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2787                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2788                 if (r) {
2789                         DRM_ERROR("Failed to add vupdate irq id!\n");
2790                         return r;
2791                 }
2792
2793                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2794                 int_params.irq_source =
2795                         dc_interrupt_to_irq_source(dc, i, 0);
2796
2797                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2798
2799                 c_irq_params->adev = adev;
2800                 c_irq_params->irq_src = int_params.irq_source;
2801
2802                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2803                                 dm_vupdate_high_irq, c_irq_params);
2804         }
2805
2806         /* Use GRPH_PFLIP interrupt */
2807         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2808                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2809                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2810                 if (r) {
2811                         DRM_ERROR("Failed to add page flip irq id!\n");
2812                         return r;
2813                 }
2814
2815                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2816                 int_params.irq_source =
2817                         dc_interrupt_to_irq_source(dc, i, 0);
2818
2819                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2820
2821                 c_irq_params->adev = adev;
2822                 c_irq_params->irq_src = int_params.irq_source;
2823
2824                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2825                                 dm_pflip_high_irq, c_irq_params);
2826
2827         }
2828
2829         /* HPD */
2830         r = amdgpu_irq_add_id(adev, client_id,
2831                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2832         if (r) {
2833                 DRM_ERROR("Failed to add hpd irq id!\n");
2834                 return r;
2835         }
2836
2837         register_hpd_handlers(adev);
2838
2839         return 0;
2840 }
2841
2842 #if defined(CONFIG_DRM_AMD_DC_DCN)
2843 /* Register IRQ sources and initialize IRQ callbacks */
2844 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2845 {
2846         struct dc *dc = adev->dm.dc;
2847         struct common_irq_params *c_irq_params;
2848         struct dc_interrupt_params int_params = {0};
2849         int r;
2850         int i;
2851
2852         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2853         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2854
2855         /*
2856          * Actions of amdgpu_irq_add_id():
2857          * 1. Register a set() function with base driver.
2858          *    Base driver will call set() function to enable/disable an
2859          *    interrupt in DC hardware.
2860          * 2. Register amdgpu_dm_irq_handler().
2861          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2862          *    coming from DC hardware.
2863          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2864          *    for acknowledging and handling.
2865          */
2866
2867         /* Use VSTARTUP interrupt */
2868         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2869                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2870                         i++) {
2871                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2872
2873                 if (r) {
2874                         DRM_ERROR("Failed to add crtc irq id!\n");
2875                         return r;
2876                 }
2877
2878                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2879                 int_params.irq_source =
2880                         dc_interrupt_to_irq_source(dc, i, 0);
2881
2882                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2883
2884                 c_irq_params->adev = adev;
2885                 c_irq_params->irq_src = int_params.irq_source;
2886
2887                 amdgpu_dm_irq_register_interrupt(
2888                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2889         }
2890
2891         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2892          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2893          * to trigger at end of each vblank, regardless of state of the lock,
2894          * matching DCE behaviour.
2895          */
2896         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2897              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2898              i++) {
2899                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2900
2901                 if (r) {
2902                         DRM_ERROR("Failed to add vupdate irq id!\n");
2903                         return r;
2904                 }
2905
2906                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2907                 int_params.irq_source =
2908                         dc_interrupt_to_irq_source(dc, i, 0);
2909
2910                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2911
2912                 c_irq_params->adev = adev;
2913                 c_irq_params->irq_src = int_params.irq_source;
2914
2915                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2916                                 dm_vupdate_high_irq, c_irq_params);
2917         }
2918
2919         /* Use GRPH_PFLIP interrupt */
2920         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2921                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2922                         i++) {
2923                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2924                 if (r) {
2925                         DRM_ERROR("Failed to add page flip irq id!\n");
2926                         return r;
2927                 }
2928
2929                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2930                 int_params.irq_source =
2931                         dc_interrupt_to_irq_source(dc, i, 0);
2932
2933                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2934
2935                 c_irq_params->adev = adev;
2936                 c_irq_params->irq_src = int_params.irq_source;
2937
2938                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2939                                 dm_pflip_high_irq, c_irq_params);
2940
2941         }
2942
2943         /* HPD */
2944         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2945                         &adev->hpd_irq);
2946         if (r) {
2947                 DRM_ERROR("Failed to add hpd irq id!\n");
2948                 return r;
2949         }
2950
2951         register_hpd_handlers(adev);
2952
2953         return 0;
2954 }
2955 #endif
2956
2957 /*
2958  * Acquires the lock for the atomic state object and returns
2959  * the new atomic state.
2960  *
2961  * This should only be called during atomic check.
2962  */
2963 static int dm_atomic_get_state(struct drm_atomic_state *state,
2964                                struct dm_atomic_state **dm_state)
2965 {
2966         struct drm_device *dev = state->dev;
2967         struct amdgpu_device *adev = drm_to_adev(dev);
2968         struct amdgpu_display_manager *dm = &adev->dm;
2969         struct drm_private_state *priv_state;
2970
2971         if (*dm_state)
2972                 return 0;
2973
2974         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2975         if (IS_ERR(priv_state))
2976                 return PTR_ERR(priv_state);
2977
2978         *dm_state = to_dm_atomic_state(priv_state);
2979
2980         return 0;
2981 }
2982
2983 static struct dm_atomic_state *
2984 dm_atomic_get_new_state(struct drm_atomic_state *state)
2985 {
2986         struct drm_device *dev = state->dev;
2987         struct amdgpu_device *adev = drm_to_adev(dev);
2988         struct amdgpu_display_manager *dm = &adev->dm;
2989         struct drm_private_obj *obj;
2990         struct drm_private_state *new_obj_state;
2991         int i;
2992
2993         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2994                 if (obj->funcs == dm->atomic_obj.funcs)
2995                         return to_dm_atomic_state(new_obj_state);
2996         }
2997
2998         return NULL;
2999 }
3000
3001 static struct drm_private_state *
3002 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3003 {
3004         struct dm_atomic_state *old_state, *new_state;
3005
3006         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3007         if (!new_state)
3008                 return NULL;
3009
3010         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3011
3012         old_state = to_dm_atomic_state(obj->state);
3013
3014         if (old_state && old_state->context)
3015                 new_state->context = dc_copy_state(old_state->context);
3016
3017         if (!new_state->context) {
3018                 kfree(new_state);
3019                 return NULL;
3020         }
3021
3022         return &new_state->base;
3023 }
3024
3025 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3026                                     struct drm_private_state *state)
3027 {
3028         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3029
3030         if (dm_state && dm_state->context)
3031                 dc_release_state(dm_state->context);
3032
3033         kfree(dm_state);
3034 }
3035
3036 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3037         .atomic_duplicate_state = dm_atomic_duplicate_state,
3038         .atomic_destroy_state = dm_atomic_destroy_state,
3039 };
3040
3041 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3042 {
3043         struct dm_atomic_state *state;
3044         int r;
3045
3046         adev->mode_info.mode_config_initialized = true;
3047
3048         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3049         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3050
3051         adev_to_drm(adev)->mode_config.max_width = 16384;
3052         adev_to_drm(adev)->mode_config.max_height = 16384;
3053
3054         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3055         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3056         /* indicates support for immediate flip */
3057         adev_to_drm(adev)->mode_config.async_page_flip = true;
3058
3059         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3060
3061         state = kzalloc(sizeof(*state), GFP_KERNEL);
3062         if (!state)
3063                 return -ENOMEM;
3064
3065         state->context = dc_create_state(adev->dm.dc);
3066         if (!state->context) {
3067                 kfree(state);
3068                 return -ENOMEM;
3069         }
3070
3071         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3072
3073         drm_atomic_private_obj_init(adev_to_drm(adev),
3074                                     &adev->dm.atomic_obj,
3075                                     &state->base,
3076                                     &dm_atomic_state_funcs);
3077
3078         r = amdgpu_display_modeset_create_props(adev);
3079         if (r) {
3080                 dc_release_state(state->context);
3081                 kfree(state);
3082                 return r;
3083         }
3084
3085         r = amdgpu_dm_audio_init(adev);
3086         if (r) {
3087                 dc_release_state(state->context);
3088                 kfree(state);
3089                 return r;
3090         }
3091
3092         return 0;
3093 }
3094
3095 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3096 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3097 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3098
3099 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3100         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3101
3102 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3103 {
3104 #if defined(CONFIG_ACPI)
3105         struct amdgpu_dm_backlight_caps caps;
3106
3107         memset(&caps, 0, sizeof(caps));
3108
3109         if (dm->backlight_caps.caps_valid)
3110                 return;
3111
3112         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3113         if (caps.caps_valid) {
3114                 dm->backlight_caps.caps_valid = true;
3115                 if (caps.aux_support)
3116                         return;
3117                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3118                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3119         } else {
3120                 dm->backlight_caps.min_input_signal =
3121                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3122                 dm->backlight_caps.max_input_signal =
3123                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3124         }
3125 #else
3126         if (dm->backlight_caps.aux_support)
3127                 return;
3128
3129         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3130         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3131 #endif
3132 }
3133
3134 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3135 {
3136         bool rc;
3137
3138         if (!link)
3139                 return 1;
3140
3141         rc = dc_link_set_backlight_level_nits(link, true, brightness,
3142                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3143
3144         return rc ? 0 : 1;
3145 }
3146
3147 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3148                                 unsigned *min, unsigned *max)
3149 {
3150         if (!caps)
3151                 return 0;
3152
3153         if (caps->aux_support) {
3154                 // Firmware limits are in nits, DC API wants millinits.
3155                 *max = 1000 * caps->aux_max_input_signal;
3156                 *min = 1000 * caps->aux_min_input_signal;
3157         } else {
3158                 // Firmware limits are 8-bit, PWM control is 16-bit.
3159                 *max = 0x101 * caps->max_input_signal;
3160                 *min = 0x101 * caps->min_input_signal;
3161         }
3162         return 1;
3163 }
3164
3165 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3166                                         uint32_t brightness)
3167 {
3168         unsigned min, max;
3169
3170         if (!get_brightness_range(caps, &min, &max))
3171                 return brightness;
3172
3173         // Rescale 0..255 to min..max
3174         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3175                                        AMDGPU_MAX_BL_LEVEL);
3176 }
3177
3178 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3179                                       uint32_t brightness)
3180 {
3181         unsigned min, max;
3182
3183         if (!get_brightness_range(caps, &min, &max))
3184                 return brightness;
3185
3186         if (brightness < min)
3187                 return 0;
3188         // Rescale min..max to 0..255
3189         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3190                                  max - min);
3191 }
3192
3193 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3194 {
3195         struct amdgpu_display_manager *dm = bl_get_data(bd);
3196         struct amdgpu_dm_backlight_caps caps;
3197         struct dc_link *link = NULL;
3198         u32 brightness;
3199         bool rc;
3200
3201         amdgpu_dm_update_backlight_caps(dm);
3202         caps = dm->backlight_caps;
3203
3204         link = (struct dc_link *)dm->backlight_link;
3205
3206         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3207         // Change brightness based on AUX property
3208         if (caps.aux_support)
3209                 return set_backlight_via_aux(link, brightness);
3210
3211         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3212
3213         return rc ? 0 : 1;
3214 }
3215
3216 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3217 {
3218         struct amdgpu_display_manager *dm = bl_get_data(bd);
3219         int ret = dc_link_get_backlight_level(dm->backlight_link);
3220
3221         if (ret == DC_ERROR_UNEXPECTED)
3222                 return bd->props.brightness;
3223         return convert_brightness_to_user(&dm->backlight_caps, ret);
3224 }
3225
3226 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3227         .options = BL_CORE_SUSPENDRESUME,
3228         .get_brightness = amdgpu_dm_backlight_get_brightness,
3229         .update_status  = amdgpu_dm_backlight_update_status,
3230 };
3231
3232 static void
3233 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3234 {
3235         char bl_name[16];
3236         struct backlight_properties props = { 0 };
3237
3238         amdgpu_dm_update_backlight_caps(dm);
3239
3240         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3241         props.brightness = AMDGPU_MAX_BL_LEVEL;
3242         props.type = BACKLIGHT_RAW;
3243
3244         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3245                  adev_to_drm(dm->adev)->primary->index);
3246
3247         dm->backlight_dev = backlight_device_register(bl_name,
3248                                                       adev_to_drm(dm->adev)->dev,
3249                                                       dm,
3250                                                       &amdgpu_dm_backlight_ops,
3251                                                       &props);
3252
3253         if (IS_ERR(dm->backlight_dev))
3254                 DRM_ERROR("DM: Backlight registration failed!\n");
3255         else
3256                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3257 }
3258
3259 #endif
3260
3261 static int initialize_plane(struct amdgpu_display_manager *dm,
3262                             struct amdgpu_mode_info *mode_info, int plane_id,
3263                             enum drm_plane_type plane_type,
3264                             const struct dc_plane_cap *plane_cap)
3265 {
3266         struct drm_plane *plane;
3267         unsigned long possible_crtcs;
3268         int ret = 0;
3269
3270         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3271         if (!plane) {
3272                 DRM_ERROR("KMS: Failed to allocate plane\n");
3273                 return -ENOMEM;
3274         }
3275         plane->type = plane_type;
3276
3277         /*
3278          * HACK: IGT tests expect that the primary plane for a CRTC
3279          * can only have one possible CRTC. Only expose support for
3280          * any CRTC if they're not going to be used as a primary plane
3281          * for a CRTC - like overlay or underlay planes.
3282          */
3283         possible_crtcs = 1 << plane_id;
3284         if (plane_id >= dm->dc->caps.max_streams)
3285                 possible_crtcs = 0xff;
3286
3287         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3288
3289         if (ret) {
3290                 DRM_ERROR("KMS: Failed to initialize plane\n");
3291                 kfree(plane);
3292                 return ret;
3293         }
3294
3295         if (mode_info)
3296                 mode_info->planes[plane_id] = plane;
3297
3298         return ret;
3299 }
3300
3301
3302 static void register_backlight_device(struct amdgpu_display_manager *dm,
3303                                       struct dc_link *link)
3304 {
3305 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3306         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3307
3308         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3309             link->type != dc_connection_none) {
3310                 /*
3311                  * Event if registration failed, we should continue with
3312                  * DM initialization because not having a backlight control
3313                  * is better then a black screen.
3314                  */
3315                 amdgpu_dm_register_backlight_device(dm);
3316
3317                 if (dm->backlight_dev)
3318                         dm->backlight_link = link;
3319         }
3320 #endif
3321 }
3322
3323
3324 /*
3325  * In this architecture, the association
3326  * connector -> encoder -> crtc
3327  * id not really requried. The crtc and connector will hold the
3328  * display_index as an abstraction to use with DAL component
3329  *
3330  * Returns 0 on success
3331  */
3332 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3333 {
3334         struct amdgpu_display_manager *dm = &adev->dm;
3335         int32_t i;
3336         struct amdgpu_dm_connector *aconnector = NULL;
3337         struct amdgpu_encoder *aencoder = NULL;
3338         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3339         uint32_t link_cnt;
3340         int32_t primary_planes;
3341         enum dc_connection_type new_connection_type = dc_connection_none;
3342         const struct dc_plane_cap *plane;
3343
3344         dm->display_indexes_num = dm->dc->caps.max_streams;
3345         /* Update the actual used number of crtc */
3346         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3347
3348         link_cnt = dm->dc->caps.max_links;
3349         if (amdgpu_dm_mode_config_init(dm->adev)) {
3350                 DRM_ERROR("DM: Failed to initialize mode config\n");
3351                 return -EINVAL;
3352         }
3353
3354         /* There is one primary plane per CRTC */
3355         primary_planes = dm->dc->caps.max_streams;
3356         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3357
3358         /*
3359          * Initialize primary planes, implicit planes for legacy IOCTLS.
3360          * Order is reversed to match iteration order in atomic check.
3361          */
3362         for (i = (primary_planes - 1); i >= 0; i--) {
3363                 plane = &dm->dc->caps.planes[i];
3364
3365                 if (initialize_plane(dm, mode_info, i,
3366                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3367                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3368                         goto fail;
3369                 }
3370         }
3371
3372         /*
3373          * Initialize overlay planes, index starting after primary planes.
3374          * These planes have a higher DRM index than the primary planes since
3375          * they should be considered as having a higher z-order.
3376          * Order is reversed to match iteration order in atomic check.
3377          *
3378          * Only support DCN for now, and only expose one so we don't encourage
3379          * userspace to use up all the pipes.
3380          */
3381         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3382                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3383
3384                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3385                         continue;
3386
3387                 if (!plane->blends_with_above || !plane->blends_with_below)
3388                         continue;
3389
3390                 if (!plane->pixel_format_support.argb8888)
3391                         continue;
3392
3393                 if (initialize_plane(dm, NULL, primary_planes + i,
3394                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3395                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3396                         goto fail;
3397                 }
3398
3399                 /* Only create one overlay plane. */
3400                 break;
3401         }
3402
3403         for (i = 0; i < dm->dc->caps.max_streams; i++)
3404                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3405                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3406                         goto fail;
3407                 }
3408
3409         /* loops over all connectors on the board */
3410         for (i = 0; i < link_cnt; i++) {
3411                 struct dc_link *link = NULL;
3412
3413                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3414                         DRM_ERROR(
3415                                 "KMS: Cannot support more than %d display indexes\n",
3416                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3417                         continue;
3418                 }
3419
3420                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3421                 if (!aconnector)
3422                         goto fail;
3423
3424                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3425                 if (!aencoder)
3426                         goto fail;
3427
3428                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3429                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3430                         goto fail;
3431                 }
3432
3433                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3434                         DRM_ERROR("KMS: Failed to initialize connector\n");
3435                         goto fail;
3436                 }
3437
3438                 link = dc_get_link_at_index(dm->dc, i);
3439
3440                 if (!dc_link_detect_sink(link, &new_connection_type))
3441                         DRM_ERROR("KMS: Failed to detect connector\n");
3442
3443                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3444                         emulated_link_detect(link);
3445                         amdgpu_dm_update_connector_after_detect(aconnector);
3446
3447                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3448                         amdgpu_dm_update_connector_after_detect(aconnector);
3449                         register_backlight_device(dm, link);
3450                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3451                                 amdgpu_dm_set_psr_caps(link);
3452                 }
3453
3454
3455         }
3456
3457         /* Software is initialized. Now we can register interrupt handlers. */
3458         switch (adev->asic_type) {
3459 #if defined(CONFIG_DRM_AMD_DC_SI)
3460         case CHIP_TAHITI:
3461         case CHIP_PITCAIRN:
3462         case CHIP_VERDE:
3463         case CHIP_OLAND:
3464                 if (dce60_register_irq_handlers(dm->adev)) {
3465                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3466                         goto fail;
3467                 }
3468                 break;
3469 #endif
3470         case CHIP_BONAIRE:
3471         case CHIP_HAWAII:
3472         case CHIP_KAVERI:
3473         case CHIP_KABINI:
3474         case CHIP_MULLINS:
3475         case CHIP_TONGA:
3476         case CHIP_FIJI:
3477         case CHIP_CARRIZO:
3478         case CHIP_STONEY:
3479         case CHIP_POLARIS11:
3480         case CHIP_POLARIS10:
3481         case CHIP_POLARIS12:
3482         case CHIP_VEGAM:
3483         case CHIP_VEGA10:
3484         case CHIP_VEGA12:
3485         case CHIP_VEGA20:
3486                 if (dce110_register_irq_handlers(dm->adev)) {
3487                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3488                         goto fail;
3489                 }
3490                 break;
3491 #if defined(CONFIG_DRM_AMD_DC_DCN)
3492         case CHIP_RAVEN:
3493         case CHIP_NAVI12:
3494         case CHIP_NAVI10:
3495         case CHIP_NAVI14:
3496         case CHIP_RENOIR:
3497         case CHIP_SIENNA_CICHLID:
3498         case CHIP_NAVY_FLOUNDER:
3499         case CHIP_DIMGREY_CAVEFISH:
3500         case CHIP_VANGOGH:
3501                 if (dcn10_register_irq_handlers(dm->adev)) {
3502                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3503                         goto fail;
3504                 }
3505                 break;
3506 #endif
3507         default:
3508                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3509                 goto fail;
3510         }
3511
3512         return 0;
3513 fail:
3514         kfree(aencoder);
3515         kfree(aconnector);
3516
3517         return -EINVAL;
3518 }
3519
3520 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3521 {
3522         drm_mode_config_cleanup(dm->ddev);
3523         drm_atomic_private_obj_fini(&dm->atomic_obj);
3524         return;
3525 }
3526
3527 /******************************************************************************
3528  * amdgpu_display_funcs functions
3529  *****************************************************************************/
3530
3531 /*
3532  * dm_bandwidth_update - program display watermarks
3533  *
3534  * @adev: amdgpu_device pointer
3535  *
3536  * Calculate and program the display watermarks and line buffer allocation.
3537  */
3538 static void dm_bandwidth_update(struct amdgpu_device *adev)
3539 {
3540         /* TODO: implement later */
3541 }
3542
3543 static const struct amdgpu_display_funcs dm_display_funcs = {
3544         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3545         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3546         .backlight_set_level = NULL, /* never called for DC */
3547         .backlight_get_level = NULL, /* never called for DC */
3548         .hpd_sense = NULL,/* called unconditionally */
3549         .hpd_set_polarity = NULL, /* called unconditionally */
3550         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3551         .page_flip_get_scanoutpos =
3552                 dm_crtc_get_scanoutpos,/* called unconditionally */
3553         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3554         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3555 };
3556
3557 #if defined(CONFIG_DEBUG_KERNEL_DC)
3558
3559 static ssize_t s3_debug_store(struct device *device,
3560                               struct device_attribute *attr,
3561                               const char *buf,
3562                               size_t count)
3563 {
3564         int ret;
3565         int s3_state;
3566         struct drm_device *drm_dev = dev_get_drvdata(device);
3567         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3568
3569         ret = kstrtoint(buf, 0, &s3_state);
3570
3571         if (ret == 0) {
3572                 if (s3_state) {
3573                         dm_resume(adev);
3574                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3575                 } else
3576                         dm_suspend(adev);
3577         }
3578
3579         return ret == 0 ? count : 0;
3580 }
3581
3582 DEVICE_ATTR_WO(s3_debug);
3583
3584 #endif
3585
3586 static int dm_early_init(void *handle)
3587 {
3588         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3589
3590         switch (adev->asic_type) {
3591 #if defined(CONFIG_DRM_AMD_DC_SI)
3592         case CHIP_TAHITI:
3593         case CHIP_PITCAIRN:
3594         case CHIP_VERDE:
3595                 adev->mode_info.num_crtc = 6;
3596                 adev->mode_info.num_hpd = 6;
3597                 adev->mode_info.num_dig = 6;
3598                 break;
3599         case CHIP_OLAND:
3600                 adev->mode_info.num_crtc = 2;
3601                 adev->mode_info.num_hpd = 2;
3602                 adev->mode_info.num_dig = 2;
3603                 break;
3604 #endif
3605         case CHIP_BONAIRE:
3606         case CHIP_HAWAII:
3607                 adev->mode_info.num_crtc = 6;
3608                 adev->mode_info.num_hpd = 6;
3609                 adev->mode_info.num_dig = 6;
3610                 break;
3611         case CHIP_KAVERI:
3612                 adev->mode_info.num_crtc = 4;
3613                 adev->mode_info.num_hpd = 6;
3614                 adev->mode_info.num_dig = 7;
3615                 break;
3616         case CHIP_KABINI:
3617         case CHIP_MULLINS:
3618                 adev->mode_info.num_crtc = 2;
3619                 adev->mode_info.num_hpd = 6;
3620                 adev->mode_info.num_dig = 6;
3621                 break;
3622         case CHIP_FIJI:
3623         case CHIP_TONGA:
3624                 adev->mode_info.num_crtc = 6;
3625                 adev->mode_info.num_hpd = 6;
3626                 adev->mode_info.num_dig = 7;
3627                 break;
3628         case CHIP_CARRIZO:
3629                 adev->mode_info.num_crtc = 3;
3630                 adev->mode_info.num_hpd = 6;
3631                 adev->mode_info.num_dig = 9;
3632                 break;
3633         case CHIP_STONEY:
3634                 adev->mode_info.num_crtc = 2;
3635                 adev->mode_info.num_hpd = 6;
3636                 adev->mode_info.num_dig = 9;
3637                 break;
3638         case CHIP_POLARIS11:
3639         case CHIP_POLARIS12:
3640                 adev->mode_info.num_crtc = 5;
3641                 adev->mode_info.num_hpd = 5;
3642                 adev->mode_info.num_dig = 5;
3643                 break;
3644         case CHIP_POLARIS10:
3645         case CHIP_VEGAM:
3646                 adev->mode_info.num_crtc = 6;
3647                 adev->mode_info.num_hpd = 6;
3648                 adev->mode_info.num_dig = 6;
3649                 break;
3650         case CHIP_VEGA10:
3651         case CHIP_VEGA12:
3652         case CHIP_VEGA20:
3653                 adev->mode_info.num_crtc = 6;
3654                 adev->mode_info.num_hpd = 6;
3655                 adev->mode_info.num_dig = 6;
3656                 break;
3657 #if defined(CONFIG_DRM_AMD_DC_DCN)
3658         case CHIP_RAVEN:
3659         case CHIP_RENOIR:
3660         case CHIP_VANGOGH:
3661                 adev->mode_info.num_crtc = 4;
3662                 adev->mode_info.num_hpd = 4;
3663                 adev->mode_info.num_dig = 4;
3664                 break;
3665         case CHIP_NAVI10:
3666         case CHIP_NAVI12:
3667         case CHIP_SIENNA_CICHLID:
3668         case CHIP_NAVY_FLOUNDER:
3669                 adev->mode_info.num_crtc = 6;
3670                 adev->mode_info.num_hpd = 6;
3671                 adev->mode_info.num_dig = 6;
3672                 break;
3673         case CHIP_NAVI14:
3674         case CHIP_DIMGREY_CAVEFISH:
3675                 adev->mode_info.num_crtc = 5;
3676                 adev->mode_info.num_hpd = 5;
3677                 adev->mode_info.num_dig = 5;
3678                 break;
3679 #endif
3680         default:
3681                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3682                 return -EINVAL;
3683         }
3684
3685         amdgpu_dm_set_irq_funcs(adev);
3686
3687         if (adev->mode_info.funcs == NULL)
3688                 adev->mode_info.funcs = &dm_display_funcs;
3689
3690         /*
3691          * Note: Do NOT change adev->audio_endpt_rreg and
3692          * adev->audio_endpt_wreg because they are initialised in
3693          * amdgpu_device_init()
3694          */
3695 #if defined(CONFIG_DEBUG_KERNEL_DC)
3696         device_create_file(
3697                 adev_to_drm(adev)->dev,
3698                 &dev_attr_s3_debug);
3699 #endif
3700
3701         return 0;
3702 }
3703
3704 static bool modeset_required(struct drm_crtc_state *crtc_state,
3705                              struct dc_stream_state *new_stream,
3706                              struct dc_stream_state *old_stream)
3707 {
3708         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3709 }
3710
3711 static bool modereset_required(struct drm_crtc_state *crtc_state)
3712 {
3713         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3714 }
3715
3716 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3717 {
3718         drm_encoder_cleanup(encoder);
3719         kfree(encoder);
3720 }
3721
3722 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3723         .destroy = amdgpu_dm_encoder_destroy,
3724 };
3725
3726
3727 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3728                                          struct drm_framebuffer *fb,
3729                                          int *min_downscale, int *max_upscale)
3730 {
3731         struct amdgpu_device *adev = drm_to_adev(dev);
3732         struct dc *dc = adev->dm.dc;
3733         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3734         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3735
3736         switch (fb->format->format) {
3737         case DRM_FORMAT_P010:
3738         case DRM_FORMAT_NV12:
3739         case DRM_FORMAT_NV21:
3740                 *max_upscale = plane_cap->max_upscale_factor.nv12;
3741                 *min_downscale = plane_cap->max_downscale_factor.nv12;
3742                 break;
3743
3744         case DRM_FORMAT_XRGB16161616F:
3745         case DRM_FORMAT_ARGB16161616F:
3746         case DRM_FORMAT_XBGR16161616F:
3747         case DRM_FORMAT_ABGR16161616F:
3748                 *max_upscale = plane_cap->max_upscale_factor.fp16;
3749                 *min_downscale = plane_cap->max_downscale_factor.fp16;
3750                 break;
3751
3752         default:
3753                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3754                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3755                 break;
3756         }
3757
3758         /*
3759          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3760          * scaling factor of 1.0 == 1000 units.
3761          */
3762         if (*max_upscale == 1)
3763                 *max_upscale = 1000;
3764
3765         if (*min_downscale == 1)
3766                 *min_downscale = 1000;
3767 }
3768
3769
3770 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3771                                 struct dc_scaling_info *scaling_info)
3772 {
3773         int scale_w, scale_h, min_downscale, max_upscale;
3774
3775         memset(scaling_info, 0, sizeof(*scaling_info));
3776
3777         /* Source is fixed 16.16 but we ignore mantissa for now... */
3778         scaling_info->src_rect.x = state->src_x >> 16;
3779         scaling_info->src_rect.y = state->src_y >> 16;
3780
3781         scaling_info->src_rect.width = state->src_w >> 16;
3782         if (scaling_info->src_rect.width == 0)
3783                 return -EINVAL;
3784
3785         scaling_info->src_rect.height = state->src_h >> 16;
3786         if (scaling_info->src_rect.height == 0)
3787                 return -EINVAL;
3788
3789         scaling_info->dst_rect.x = state->crtc_x;
3790         scaling_info->dst_rect.y = state->crtc_y;
3791
3792         if (state->crtc_w == 0)
3793                 return -EINVAL;
3794
3795         scaling_info->dst_rect.width = state->crtc_w;
3796
3797         if (state->crtc_h == 0)
3798                 return -EINVAL;
3799
3800         scaling_info->dst_rect.height = state->crtc_h;
3801
3802         /* DRM doesn't specify clipping on destination output. */
3803         scaling_info->clip_rect = scaling_info->dst_rect;
3804
3805         /* Validate scaling per-format with DC plane caps */
3806         if (state->plane && state->plane->dev && state->fb) {
3807                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3808                                              &min_downscale, &max_upscale);
3809         } else {
3810                 min_downscale = 250;
3811                 max_upscale = 16000;
3812         }
3813
3814         scale_w = scaling_info->dst_rect.width * 1000 /
3815                   scaling_info->src_rect.width;
3816
3817         if (scale_w < min_downscale || scale_w > max_upscale)
3818                 return -EINVAL;
3819
3820         scale_h = scaling_info->dst_rect.height * 1000 /
3821                   scaling_info->src_rect.height;
3822
3823         if (scale_h < min_downscale || scale_h > max_upscale)
3824                 return -EINVAL;
3825
3826         /*
3827          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3828          * assume reasonable defaults based on the format.
3829          */
3830
3831         return 0;
3832 }
3833
3834 static void
3835 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3836                                  uint64_t tiling_flags)
3837 {
3838         /* Fill GFX8 params */
3839         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3840                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3841
3842                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3843                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3844                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3845                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3846                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3847
3848                 /* XXX fix me for VI */
3849                 tiling_info->gfx8.num_banks = num_banks;
3850                 tiling_info->gfx8.array_mode =
3851                                 DC_ARRAY_2D_TILED_THIN1;
3852                 tiling_info->gfx8.tile_split = tile_split;
3853                 tiling_info->gfx8.bank_width = bankw;
3854                 tiling_info->gfx8.bank_height = bankh;
3855                 tiling_info->gfx8.tile_aspect = mtaspect;
3856                 tiling_info->gfx8.tile_mode =
3857                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3858         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3859                         == DC_ARRAY_1D_TILED_THIN1) {
3860                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3861         }
3862
3863         tiling_info->gfx8.pipe_config =
3864                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3865 }
3866
3867 static void
3868 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3869                                   union dc_tiling_info *tiling_info)
3870 {
3871         tiling_info->gfx9.num_pipes =
3872                 adev->gfx.config.gb_addr_config_fields.num_pipes;
3873         tiling_info->gfx9.num_banks =
3874                 adev->gfx.config.gb_addr_config_fields.num_banks;
3875         tiling_info->gfx9.pipe_interleave =
3876                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3877         tiling_info->gfx9.num_shader_engines =
3878                 adev->gfx.config.gb_addr_config_fields.num_se;
3879         tiling_info->gfx9.max_compressed_frags =
3880                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3881         tiling_info->gfx9.num_rb_per_se =
3882                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3883         tiling_info->gfx9.shaderEnable = 1;
3884         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3885             adev->asic_type == CHIP_NAVY_FLOUNDER ||
3886             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3887             adev->asic_type == CHIP_VANGOGH)
3888                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3889 }
3890
3891 static int
3892 validate_dcc(struct amdgpu_device *adev,
3893              const enum surface_pixel_format format,
3894              const enum dc_rotation_angle rotation,
3895              const union dc_tiling_info *tiling_info,
3896              const struct dc_plane_dcc_param *dcc,
3897              const struct dc_plane_address *address,
3898              const struct plane_size *plane_size)
3899 {
3900         struct dc *dc = adev->dm.dc;
3901         struct dc_dcc_surface_param input;
3902         struct dc_surface_dcc_cap output;
3903
3904         memset(&input, 0, sizeof(input));
3905         memset(&output, 0, sizeof(output));
3906
3907         if (!dcc->enable)
3908                 return 0;
3909
3910         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3911             !dc->cap_funcs.get_dcc_compression_cap)
3912                 return -EINVAL;
3913
3914         input.format = format;
3915         input.surface_size.width = plane_size->surface_size.width;
3916         input.surface_size.height = plane_size->surface_size.height;
3917         input.swizzle_mode = tiling_info->gfx9.swizzle;
3918
3919         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3920                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3921         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3922                 input.scan = SCAN_DIRECTION_VERTICAL;
3923
3924         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3925                 return -EINVAL;
3926
3927         if (!output.capable)
3928                 return -EINVAL;
3929
3930         if (dcc->independent_64b_blks == 0 &&
3931             output.grph.rgb.independent_64b_blks != 0)
3932                 return -EINVAL;
3933
3934         return 0;
3935 }
3936
3937 static bool
3938 modifier_has_dcc(uint64_t modifier)
3939 {
3940         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3941 }
3942
3943 static unsigned
3944 modifier_gfx9_swizzle_mode(uint64_t modifier)
3945 {
3946         if (modifier == DRM_FORMAT_MOD_LINEAR)
3947                 return 0;
3948
3949         return AMD_FMT_MOD_GET(TILE, modifier);
3950 }
3951
3952 static const struct drm_format_info *
3953 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3954 {
3955         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3956 }
3957
3958 static void
3959 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3960                                     union dc_tiling_info *tiling_info,
3961                                     uint64_t modifier)
3962 {
3963         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3964         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3965         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3966         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3967
3968         fill_gfx9_tiling_info_from_device(adev, tiling_info);
3969
3970         if (!IS_AMD_FMT_MOD(modifier))
3971                 return;
3972
3973         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3974         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3975
3976         if (adev->family >= AMDGPU_FAMILY_NV) {
3977                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3978         } else {
3979                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3980
3981                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3982         }
3983 }
3984
3985 enum dm_micro_swizzle {
3986         MICRO_SWIZZLE_Z = 0,
3987         MICRO_SWIZZLE_S = 1,
3988         MICRO_SWIZZLE_D = 2,
3989         MICRO_SWIZZLE_R = 3
3990 };
3991
3992 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3993                                           uint32_t format,
3994                                           uint64_t modifier)
3995 {
3996         struct amdgpu_device *adev = drm_to_adev(plane->dev);
3997         const struct drm_format_info *info = drm_format_info(format);
3998
3999         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4000
4001         if (!info)
4002                 return false;
4003
4004         /*
4005          * We always have to allow this modifier, because core DRM still
4006          * checks LINEAR support if userspace does not provide modifers.
4007          */
4008         if (modifier == DRM_FORMAT_MOD_LINEAR)
4009                 return true;
4010
4011         /*
4012          * The arbitrary tiling support for multiplane formats has not been hooked
4013          * up.
4014          */
4015         if (info->num_planes > 1)
4016                 return false;
4017
4018         /*
4019          * For D swizzle the canonical modifier depends on the bpp, so check
4020          * it here.
4021          */
4022         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4023             adev->family >= AMDGPU_FAMILY_NV) {
4024                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4025                         return false;
4026         }
4027
4028         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4029             info->cpp[0] < 8)
4030                 return false;
4031
4032         if (modifier_has_dcc(modifier)) {
4033                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4034                 if (info->cpp[0] != 4)
4035                         return false;
4036         }
4037
4038         return true;
4039 }
4040
4041 static void
4042 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4043 {
4044         if (!*mods)
4045                 return;
4046
4047         if (*cap - *size < 1) {
4048                 uint64_t new_cap = *cap * 2;
4049                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4050
4051                 if (!new_mods) {
4052                         kfree(*mods);
4053                         *mods = NULL;
4054                         return;
4055                 }
4056
4057                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4058                 kfree(*mods);
4059                 *mods = new_mods;
4060                 *cap = new_cap;
4061         }
4062
4063         (*mods)[*size] = mod;
4064         *size += 1;
4065 }
4066
4067 static void
4068 add_gfx9_modifiers(const struct amdgpu_device *adev,
4069                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4070 {
4071         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4072         int pipe_xor_bits = min(8, pipes +
4073                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4074         int bank_xor_bits = min(8 - pipe_xor_bits,
4075                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4076         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4077                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4078
4079
4080         if (adev->family == AMDGPU_FAMILY_RV) {
4081                 /* Raven2 and later */
4082                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4083
4084                 /*
4085                  * No _D DCC swizzles yet because we only allow 32bpp, which
4086                  * doesn't support _D on DCN
4087                  */
4088
4089                 if (has_constant_encode) {
4090                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4091                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4092                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4093                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4094                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4095                                     AMD_FMT_MOD_SET(DCC, 1) |
4096                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4097                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4098                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4099                 }
4100
4101                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4102                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4103                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4104                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4105                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4106                             AMD_FMT_MOD_SET(DCC, 1) |
4107                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4108                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4109                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4110
4111                 if (has_constant_encode) {
4112                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4113                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4114                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4115                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4116                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4117                                     AMD_FMT_MOD_SET(DCC, 1) |
4118                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4119                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4120                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4121
4122                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4123                                     AMD_FMT_MOD_SET(RB, rb) |
4124                                     AMD_FMT_MOD_SET(PIPE, pipes));
4125                 }
4126
4127                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4128                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4129                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4130                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4131                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4132                             AMD_FMT_MOD_SET(DCC, 1) |
4133                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4134                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4135                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4136                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4137                             AMD_FMT_MOD_SET(RB, rb) |
4138                             AMD_FMT_MOD_SET(PIPE, pipes));
4139         }
4140
4141         /*
4142          * Only supported for 64bpp on Raven, will be filtered on format in
4143          * dm_plane_format_mod_supported.
4144          */
4145         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4146                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4147                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4148                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4149                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4150
4151         if (adev->family == AMDGPU_FAMILY_RV) {
4152                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4153                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4154                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4155                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4156                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4157         }
4158
4159         /*
4160          * Only supported for 64bpp on Raven, will be filtered on format in
4161          * dm_plane_format_mod_supported.
4162          */
4163         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4164                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4165                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4166
4167         if (adev->family == AMDGPU_FAMILY_RV) {
4168                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4169                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4170                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4171         }
4172 }
4173
4174 static void
4175 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4176                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4177 {
4178         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4179
4180         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4181                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4182                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4183                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4184                     AMD_FMT_MOD_SET(DCC, 1) |
4185                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4186                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4187                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4188
4189         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4190                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4191                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4192                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4193                     AMD_FMT_MOD_SET(DCC, 1) |
4194                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4195                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4196                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4197                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4198
4199         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4200                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4201                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4202                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4203
4204         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4205                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4206                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4207                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4208
4209
4210         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4211         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4212                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4213                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4214
4215         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4216                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4217                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4218 }
4219
4220 static void
4221 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4222                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4223 {
4224         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4225         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4226
4227         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4228                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4229                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4230                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4231                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4232                     AMD_FMT_MOD_SET(DCC, 1) |
4233                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4234                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4235                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4236                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4237
4238         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4239                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4240                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4241                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4242                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4243                     AMD_FMT_MOD_SET(DCC, 1) |
4244                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4245                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4246                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4247                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4248                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4249
4250         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4251                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4252                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4253                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4254                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4255
4256         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4257                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4258                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4259                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4260                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4261
4262         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4263         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4264                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4265                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4266
4267         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4268                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4269                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4270 }
4271
4272 static int
4273 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4274 {
4275         uint64_t size = 0, capacity = 128;
4276         *mods = NULL;
4277
4278         /* We have not hooked up any pre-GFX9 modifiers. */
4279         if (adev->family < AMDGPU_FAMILY_AI)
4280                 return 0;
4281
4282         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4283
4284         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4285                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4286                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4287                 return *mods ? 0 : -ENOMEM;
4288         }
4289
4290         switch (adev->family) {
4291         case AMDGPU_FAMILY_AI:
4292         case AMDGPU_FAMILY_RV:
4293                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4294                 break;
4295         case AMDGPU_FAMILY_NV:
4296         case AMDGPU_FAMILY_VGH:
4297                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4298                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4299                 else
4300                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4301                 break;
4302         }
4303
4304         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4305
4306         /* INVALID marks the end of the list. */
4307         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4308
4309         if (!*mods)
4310                 return -ENOMEM;
4311
4312         return 0;
4313 }
4314
4315 static int
4316 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4317                                           const struct amdgpu_framebuffer *afb,
4318                                           const enum surface_pixel_format format,
4319                                           const enum dc_rotation_angle rotation,
4320                                           const struct plane_size *plane_size,
4321                                           union dc_tiling_info *tiling_info,
4322                                           struct dc_plane_dcc_param *dcc,
4323                                           struct dc_plane_address *address,
4324                                           const bool force_disable_dcc)
4325 {
4326         const uint64_t modifier = afb->base.modifier;
4327         int ret;
4328
4329         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4330         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4331
4332         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4333                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4334
4335                 dcc->enable = 1;
4336                 dcc->meta_pitch = afb->base.pitches[1];
4337                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4338
4339                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4340                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4341         }
4342
4343         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4344         if (ret)
4345                 return ret;
4346
4347         return 0;
4348 }
4349
4350 static int
4351 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4352                              const struct amdgpu_framebuffer *afb,
4353                              const enum surface_pixel_format format,
4354                              const enum dc_rotation_angle rotation,
4355                              const uint64_t tiling_flags,
4356                              union dc_tiling_info *tiling_info,
4357                              struct plane_size *plane_size,
4358                              struct dc_plane_dcc_param *dcc,
4359                              struct dc_plane_address *address,
4360                              bool tmz_surface,
4361                              bool force_disable_dcc)
4362 {
4363         const struct drm_framebuffer *fb = &afb->base;
4364         int ret;
4365
4366         memset(tiling_info, 0, sizeof(*tiling_info));
4367         memset(plane_size, 0, sizeof(*plane_size));
4368         memset(dcc, 0, sizeof(*dcc));
4369         memset(address, 0, sizeof(*address));
4370
4371         address->tmz_surface = tmz_surface;
4372
4373         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4374                 uint64_t addr = afb->address + fb->offsets[0];
4375
4376                 plane_size->surface_size.x = 0;
4377                 plane_size->surface_size.y = 0;
4378                 plane_size->surface_size.width = fb->width;
4379                 plane_size->surface_size.height = fb->height;
4380                 plane_size->surface_pitch =
4381                         fb->pitches[0] / fb->format->cpp[0];
4382
4383                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4384                 address->grph.addr.low_part = lower_32_bits(addr);
4385                 address->grph.addr.high_part = upper_32_bits(addr);
4386         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4387                 uint64_t luma_addr = afb->address + fb->offsets[0];
4388                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4389
4390                 plane_size->surface_size.x = 0;
4391                 plane_size->surface_size.y = 0;
4392                 plane_size->surface_size.width = fb->width;
4393                 plane_size->surface_size.height = fb->height;
4394                 plane_size->surface_pitch =
4395                         fb->pitches[0] / fb->format->cpp[0];
4396
4397                 plane_size->chroma_size.x = 0;
4398                 plane_size->chroma_size.y = 0;
4399                 /* TODO: set these based on surface format */
4400                 plane_size->chroma_size.width = fb->width / 2;
4401                 plane_size->chroma_size.height = fb->height / 2;
4402
4403                 plane_size->chroma_pitch =
4404                         fb->pitches[1] / fb->format->cpp[1];
4405
4406                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4407                 address->video_progressive.luma_addr.low_part =
4408                         lower_32_bits(luma_addr);
4409                 address->video_progressive.luma_addr.high_part =
4410                         upper_32_bits(luma_addr);
4411                 address->video_progressive.chroma_addr.low_part =
4412                         lower_32_bits(chroma_addr);
4413                 address->video_progressive.chroma_addr.high_part =
4414                         upper_32_bits(chroma_addr);
4415         }
4416
4417         if (adev->family >= AMDGPU_FAMILY_AI) {
4418                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4419                                                                 rotation, plane_size,
4420                                                                 tiling_info, dcc,
4421                                                                 address,
4422                                                                 force_disable_dcc);
4423                 if (ret)
4424                         return ret;
4425         } else {
4426                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4427         }
4428
4429         return 0;
4430 }
4431
4432 static void
4433 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4434                                bool *per_pixel_alpha, bool *global_alpha,
4435                                int *global_alpha_value)
4436 {
4437         *per_pixel_alpha = false;
4438         *global_alpha = false;
4439         *global_alpha_value = 0xff;
4440
4441         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4442                 return;
4443
4444         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4445                 static const uint32_t alpha_formats[] = {
4446                         DRM_FORMAT_ARGB8888,
4447                         DRM_FORMAT_RGBA8888,
4448                         DRM_FORMAT_ABGR8888,
4449                 };
4450                 uint32_t format = plane_state->fb->format->format;
4451                 unsigned int i;
4452
4453                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4454                         if (format == alpha_formats[i]) {
4455                                 *per_pixel_alpha = true;
4456                                 break;
4457                         }
4458                 }
4459         }
4460
4461         if (plane_state->alpha < 0xffff) {
4462                 *global_alpha = true;
4463                 *global_alpha_value = plane_state->alpha >> 8;
4464         }
4465 }
4466
4467 static int
4468 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4469                             const enum surface_pixel_format format,
4470                             enum dc_color_space *color_space)
4471 {
4472         bool full_range;
4473
4474         *color_space = COLOR_SPACE_SRGB;
4475
4476         /* DRM color properties only affect non-RGB formats. */
4477         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4478                 return 0;
4479
4480         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4481
4482         switch (plane_state->color_encoding) {
4483         case DRM_COLOR_YCBCR_BT601:
4484                 if (full_range)
4485                         *color_space = COLOR_SPACE_YCBCR601;
4486                 else
4487                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4488                 break;
4489
4490         case DRM_COLOR_YCBCR_BT709:
4491                 if (full_range)
4492                         *color_space = COLOR_SPACE_YCBCR709;
4493                 else
4494                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4495                 break;
4496
4497         case DRM_COLOR_YCBCR_BT2020:
4498                 if (full_range)
4499                         *color_space = COLOR_SPACE_2020_YCBCR;
4500                 else
4501                         return -EINVAL;
4502                 break;
4503
4504         default:
4505                 return -EINVAL;
4506         }
4507
4508         return 0;
4509 }
4510
4511 static int
4512 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4513                             const struct drm_plane_state *plane_state,
4514                             const uint64_t tiling_flags,
4515                             struct dc_plane_info *plane_info,
4516                             struct dc_plane_address *address,
4517                             bool tmz_surface,
4518                             bool force_disable_dcc)
4519 {
4520         const struct drm_framebuffer *fb = plane_state->fb;
4521         const struct amdgpu_framebuffer *afb =
4522                 to_amdgpu_framebuffer(plane_state->fb);
4523         struct drm_format_name_buf format_name;
4524         int ret;
4525
4526         memset(plane_info, 0, sizeof(*plane_info));
4527
4528         switch (fb->format->format) {
4529         case DRM_FORMAT_C8:
4530                 plane_info->format =
4531                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4532                 break;
4533         case DRM_FORMAT_RGB565:
4534                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4535                 break;
4536         case DRM_FORMAT_XRGB8888:
4537         case DRM_FORMAT_ARGB8888:
4538                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4539                 break;
4540         case DRM_FORMAT_XRGB2101010:
4541         case DRM_FORMAT_ARGB2101010:
4542                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4543                 break;
4544         case DRM_FORMAT_XBGR2101010:
4545         case DRM_FORMAT_ABGR2101010:
4546                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4547                 break;
4548         case DRM_FORMAT_XBGR8888:
4549         case DRM_FORMAT_ABGR8888:
4550                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4551                 break;
4552         case DRM_FORMAT_NV21:
4553                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4554                 break;
4555         case DRM_FORMAT_NV12:
4556                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4557                 break;
4558         case DRM_FORMAT_P010:
4559                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4560                 break;
4561         case DRM_FORMAT_XRGB16161616F:
4562         case DRM_FORMAT_ARGB16161616F:
4563                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4564                 break;
4565         case DRM_FORMAT_XBGR16161616F:
4566         case DRM_FORMAT_ABGR16161616F:
4567                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4568                 break;
4569         default:
4570                 DRM_ERROR(
4571                         "Unsupported screen format %s\n",
4572                         drm_get_format_name(fb->format->format, &format_name));
4573                 return -EINVAL;
4574         }
4575
4576         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4577         case DRM_MODE_ROTATE_0:
4578                 plane_info->rotation = ROTATION_ANGLE_0;
4579                 break;
4580         case DRM_MODE_ROTATE_90:
4581                 plane_info->rotation = ROTATION_ANGLE_90;
4582                 break;
4583         case DRM_MODE_ROTATE_180:
4584                 plane_info->rotation = ROTATION_ANGLE_180;
4585                 break;
4586         case DRM_MODE_ROTATE_270:
4587                 plane_info->rotation = ROTATION_ANGLE_270;
4588                 break;
4589         default:
4590                 plane_info->rotation = ROTATION_ANGLE_0;
4591                 break;
4592         }
4593
4594         plane_info->visible = true;
4595         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4596
4597         plane_info->layer_index = 0;
4598
4599         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4600                                           &plane_info->color_space);
4601         if (ret)
4602                 return ret;
4603
4604         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4605                                            plane_info->rotation, tiling_flags,
4606                                            &plane_info->tiling_info,
4607                                            &plane_info->plane_size,
4608                                            &plane_info->dcc, address, tmz_surface,
4609                                            force_disable_dcc);
4610         if (ret)
4611                 return ret;
4612
4613         fill_blending_from_plane_state(
4614                 plane_state, &plane_info->per_pixel_alpha,
4615                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4616
4617         return 0;
4618 }
4619
4620 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4621                                     struct dc_plane_state *dc_plane_state,
4622                                     struct drm_plane_state *plane_state,
4623                                     struct drm_crtc_state *crtc_state)
4624 {
4625         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4626         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4627         struct dc_scaling_info scaling_info;
4628         struct dc_plane_info plane_info;
4629         int ret;
4630         bool force_disable_dcc = false;
4631
4632         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4633         if (ret)
4634                 return ret;
4635
4636         dc_plane_state->src_rect = scaling_info.src_rect;
4637         dc_plane_state->dst_rect = scaling_info.dst_rect;
4638         dc_plane_state->clip_rect = scaling_info.clip_rect;
4639         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4640
4641         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4642         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4643                                           afb->tiling_flags,
4644                                           &plane_info,
4645                                           &dc_plane_state->address,
4646                                           afb->tmz_surface,
4647                                           force_disable_dcc);
4648         if (ret)
4649                 return ret;
4650
4651         dc_plane_state->format = plane_info.format;
4652         dc_plane_state->color_space = plane_info.color_space;
4653         dc_plane_state->format = plane_info.format;
4654         dc_plane_state->plane_size = plane_info.plane_size;
4655         dc_plane_state->rotation = plane_info.rotation;
4656         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4657         dc_plane_state->stereo_format = plane_info.stereo_format;
4658         dc_plane_state->tiling_info = plane_info.tiling_info;
4659         dc_plane_state->visible = plane_info.visible;
4660         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4661         dc_plane_state->global_alpha = plane_info.global_alpha;
4662         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4663         dc_plane_state->dcc = plane_info.dcc;
4664         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4665
4666         /*
4667          * Always set input transfer function, since plane state is refreshed
4668          * every time.
4669          */
4670         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4671         if (ret)
4672                 return ret;
4673
4674         return 0;
4675 }
4676
4677 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4678                                            const struct dm_connector_state *dm_state,
4679                                            struct dc_stream_state *stream)
4680 {
4681         enum amdgpu_rmx_type rmx_type;
4682
4683         struct rect src = { 0 }; /* viewport in composition space*/
4684         struct rect dst = { 0 }; /* stream addressable area */
4685
4686         /* no mode. nothing to be done */
4687         if (!mode)
4688                 return;
4689
4690         /* Full screen scaling by default */
4691         src.width = mode->hdisplay;
4692         src.height = mode->vdisplay;
4693         dst.width = stream->timing.h_addressable;
4694         dst.height = stream->timing.v_addressable;
4695
4696         if (dm_state) {
4697                 rmx_type = dm_state->scaling;
4698                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4699                         if (src.width * dst.height <
4700                                         src.height * dst.width) {
4701                                 /* height needs less upscaling/more downscaling */
4702                                 dst.width = src.width *
4703                                                 dst.height / src.height;
4704                         } else {
4705                                 /* width needs less upscaling/more downscaling */
4706                                 dst.height = src.height *
4707                                                 dst.width / src.width;
4708                         }
4709                 } else if (rmx_type == RMX_CENTER) {
4710                         dst = src;
4711                 }
4712
4713                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4714                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4715
4716                 if (dm_state->underscan_enable) {
4717                         dst.x += dm_state->underscan_hborder / 2;
4718                         dst.y += dm_state->underscan_vborder / 2;
4719                         dst.width -= dm_state->underscan_hborder;
4720                         dst.height -= dm_state->underscan_vborder;
4721                 }
4722         }
4723
4724         stream->src = src;
4725         stream->dst = dst;
4726
4727         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4728                         dst.x, dst.y, dst.width, dst.height);
4729
4730 }
4731
4732 static enum dc_color_depth
4733 convert_color_depth_from_display_info(const struct drm_connector *connector,
4734                                       bool is_y420, int requested_bpc)
4735 {
4736         uint8_t bpc;
4737
4738         if (is_y420) {
4739                 bpc = 8;
4740
4741                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4742                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4743                         bpc = 16;
4744                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4745                         bpc = 12;
4746                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4747                         bpc = 10;
4748         } else {
4749                 bpc = (uint8_t)connector->display_info.bpc;
4750                 /* Assume 8 bpc by default if no bpc is specified. */
4751                 bpc = bpc ? bpc : 8;
4752         }
4753
4754         if (requested_bpc > 0) {
4755                 /*
4756                  * Cap display bpc based on the user requested value.
4757                  *
4758                  * The value for state->max_bpc may not correctly updated
4759                  * depending on when the connector gets added to the state
4760                  * or if this was called outside of atomic check, so it
4761                  * can't be used directly.
4762                  */
4763                 bpc = min_t(u8, bpc, requested_bpc);
4764
4765                 /* Round down to the nearest even number. */
4766                 bpc = bpc - (bpc & 1);
4767         }
4768
4769         switch (bpc) {
4770         case 0:
4771                 /*
4772                  * Temporary Work around, DRM doesn't parse color depth for
4773                  * EDID revision before 1.4
4774                  * TODO: Fix edid parsing
4775                  */
4776                 return COLOR_DEPTH_888;
4777         case 6:
4778                 return COLOR_DEPTH_666;
4779         case 8:
4780                 return COLOR_DEPTH_888;
4781         case 10:
4782                 return COLOR_DEPTH_101010;
4783         case 12:
4784                 return COLOR_DEPTH_121212;
4785         case 14:
4786                 return COLOR_DEPTH_141414;
4787         case 16:
4788                 return COLOR_DEPTH_161616;
4789         default:
4790                 return COLOR_DEPTH_UNDEFINED;
4791         }
4792 }
4793
4794 static enum dc_aspect_ratio
4795 get_aspect_ratio(const struct drm_display_mode *mode_in)
4796 {
4797         /* 1-1 mapping, since both enums follow the HDMI spec. */
4798         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4799 }
4800
4801 static enum dc_color_space
4802 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4803 {
4804         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4805
4806         switch (dc_crtc_timing->pixel_encoding) {
4807         case PIXEL_ENCODING_YCBCR422:
4808         case PIXEL_ENCODING_YCBCR444:
4809         case PIXEL_ENCODING_YCBCR420:
4810         {
4811                 /*
4812                  * 27030khz is the separation point between HDTV and SDTV
4813                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4814                  * respectively
4815                  */
4816                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4817                         if (dc_crtc_timing->flags.Y_ONLY)
4818                                 color_space =
4819                                         COLOR_SPACE_YCBCR709_LIMITED;
4820                         else
4821                                 color_space = COLOR_SPACE_YCBCR709;
4822                 } else {
4823                         if (dc_crtc_timing->flags.Y_ONLY)
4824                                 color_space =
4825                                         COLOR_SPACE_YCBCR601_LIMITED;
4826                         else
4827                                 color_space = COLOR_SPACE_YCBCR601;
4828                 }
4829
4830         }
4831         break;
4832         case PIXEL_ENCODING_RGB:
4833                 color_space = COLOR_SPACE_SRGB;
4834                 break;
4835
4836         default:
4837                 WARN_ON(1);
4838                 break;
4839         }
4840
4841         return color_space;
4842 }
4843
4844 static bool adjust_colour_depth_from_display_info(
4845         struct dc_crtc_timing *timing_out,
4846         const struct drm_display_info *info)
4847 {
4848         enum dc_color_depth depth = timing_out->display_color_depth;
4849         int normalized_clk;
4850         do {
4851                 normalized_clk = timing_out->pix_clk_100hz / 10;
4852                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4853                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4854                         normalized_clk /= 2;
4855                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4856                 switch (depth) {
4857                 case COLOR_DEPTH_888:
4858                         break;
4859                 case COLOR_DEPTH_101010:
4860                         normalized_clk = (normalized_clk * 30) / 24;
4861                         break;
4862                 case COLOR_DEPTH_121212:
4863                         normalized_clk = (normalized_clk * 36) / 24;
4864                         break;
4865                 case COLOR_DEPTH_161616:
4866                         normalized_clk = (normalized_clk * 48) / 24;
4867                         break;
4868                 default:
4869                         /* The above depths are the only ones valid for HDMI. */
4870                         return false;
4871                 }
4872                 if (normalized_clk <= info->max_tmds_clock) {
4873                         timing_out->display_color_depth = depth;
4874                         return true;
4875                 }
4876         } while (--depth > COLOR_DEPTH_666);
4877         return false;
4878 }
4879
4880 static void fill_stream_properties_from_drm_display_mode(
4881         struct dc_stream_state *stream,
4882         const struct drm_display_mode *mode_in,
4883         const struct drm_connector *connector,
4884         const struct drm_connector_state *connector_state,
4885         const struct dc_stream_state *old_stream,
4886         int requested_bpc)
4887 {
4888         struct dc_crtc_timing *timing_out = &stream->timing;
4889         const struct drm_display_info *info = &connector->display_info;
4890         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4891         struct hdmi_vendor_infoframe hv_frame;
4892         struct hdmi_avi_infoframe avi_frame;
4893
4894         memset(&hv_frame, 0, sizeof(hv_frame));
4895         memset(&avi_frame, 0, sizeof(avi_frame));
4896
4897         timing_out->h_border_left = 0;
4898         timing_out->h_border_right = 0;
4899         timing_out->v_border_top = 0;
4900         timing_out->v_border_bottom = 0;
4901         /* TODO: un-hardcode */
4902         if (drm_mode_is_420_only(info, mode_in)
4903                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4904                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4905         else if (drm_mode_is_420_also(info, mode_in)
4906                         && aconnector->force_yuv420_output)
4907                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4908         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4909                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4910                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4911         else
4912                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4913
4914         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4915         timing_out->display_color_depth = convert_color_depth_from_display_info(
4916                 connector,
4917                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4918                 requested_bpc);
4919         timing_out->scan_type = SCANNING_TYPE_NODATA;
4920         timing_out->hdmi_vic = 0;
4921
4922         if(old_stream) {
4923                 timing_out->vic = old_stream->timing.vic;
4924                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4925                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4926         } else {
4927                 timing_out->vic = drm_match_cea_mode(mode_in);
4928                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4929                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4930                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4931                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4932         }
4933
4934         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4935                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4936                 timing_out->vic = avi_frame.video_code;
4937                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4938                 timing_out->hdmi_vic = hv_frame.vic;
4939         }
4940
4941         timing_out->h_addressable = mode_in->crtc_hdisplay;
4942         timing_out->h_total = mode_in->crtc_htotal;
4943         timing_out->h_sync_width =
4944                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4945         timing_out->h_front_porch =
4946                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4947         timing_out->v_total = mode_in->crtc_vtotal;
4948         timing_out->v_addressable = mode_in->crtc_vdisplay;
4949         timing_out->v_front_porch =
4950                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4951         timing_out->v_sync_width =
4952                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4953         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4954         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4955
4956         stream->output_color_space = get_output_color_space(timing_out);
4957
4958         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4959         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4960         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4961                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4962                     drm_mode_is_420_also(info, mode_in) &&
4963                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4964                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4965                         adjust_colour_depth_from_display_info(timing_out, info);
4966                 }
4967         }
4968 }
4969
4970 static void fill_audio_info(struct audio_info *audio_info,
4971                             const struct drm_connector *drm_connector,
4972                             const struct dc_sink *dc_sink)
4973 {
4974         int i = 0;
4975         int cea_revision = 0;
4976         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4977
4978         audio_info->manufacture_id = edid_caps->manufacturer_id;
4979         audio_info->product_id = edid_caps->product_id;
4980
4981         cea_revision = drm_connector->display_info.cea_rev;
4982
4983         strscpy(audio_info->display_name,
4984                 edid_caps->display_name,
4985                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4986
4987         if (cea_revision >= 3) {
4988                 audio_info->mode_count = edid_caps->audio_mode_count;
4989
4990                 for (i = 0; i < audio_info->mode_count; ++i) {
4991                         audio_info->modes[i].format_code =
4992                                         (enum audio_format_code)
4993                                         (edid_caps->audio_modes[i].format_code);
4994                         audio_info->modes[i].channel_count =
4995                                         edid_caps->audio_modes[i].channel_count;
4996                         audio_info->modes[i].sample_rates.all =
4997                                         edid_caps->audio_modes[i].sample_rate;
4998                         audio_info->modes[i].sample_size =
4999                                         edid_caps->audio_modes[i].sample_size;
5000                 }
5001         }
5002
5003         audio_info->flags.all = edid_caps->speaker_flags;
5004
5005         /* TODO: We only check for the progressive mode, check for interlace mode too */
5006         if (drm_connector->latency_present[0]) {
5007                 audio_info->video_latency = drm_connector->video_latency[0];
5008                 audio_info->audio_latency = drm_connector->audio_latency[0];
5009         }
5010
5011         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5012
5013 }
5014
5015 static void
5016 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5017                                       struct drm_display_mode *dst_mode)
5018 {
5019         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5020         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5021         dst_mode->crtc_clock = src_mode->crtc_clock;
5022         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5023         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5024         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5025         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5026         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5027         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5028         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5029         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5030         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5031         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5032         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5033 }
5034
5035 static void
5036 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5037                                         const struct drm_display_mode *native_mode,
5038                                         bool scale_enabled)
5039 {
5040         if (scale_enabled) {
5041                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5042         } else if (native_mode->clock == drm_mode->clock &&
5043                         native_mode->htotal == drm_mode->htotal &&
5044                         native_mode->vtotal == drm_mode->vtotal) {
5045                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5046         } else {
5047                 /* no scaling nor amdgpu inserted, no need to patch */
5048         }
5049 }
5050
5051 static struct dc_sink *
5052 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5053 {
5054         struct dc_sink_init_data sink_init_data = { 0 };
5055         struct dc_sink *sink = NULL;
5056         sink_init_data.link = aconnector->dc_link;
5057         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5058
5059         sink = dc_sink_create(&sink_init_data);
5060         if (!sink) {
5061                 DRM_ERROR("Failed to create sink!\n");
5062                 return NULL;
5063         }
5064         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5065
5066         return sink;
5067 }
5068
5069 static void set_multisync_trigger_params(
5070                 struct dc_stream_state *stream)
5071 {
5072         if (stream->triggered_crtc_reset.enabled) {
5073                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5074                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5075         }
5076 }
5077
5078 static void set_master_stream(struct dc_stream_state *stream_set[],
5079                               int stream_count)
5080 {
5081         int j, highest_rfr = 0, master_stream = 0;
5082
5083         for (j = 0;  j < stream_count; j++) {
5084                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5085                         int refresh_rate = 0;
5086
5087                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5088                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5089                         if (refresh_rate > highest_rfr) {
5090                                 highest_rfr = refresh_rate;
5091                                 master_stream = j;
5092                         }
5093                 }
5094         }
5095         for (j = 0;  j < stream_count; j++) {
5096                 if (stream_set[j])
5097                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5098         }
5099 }
5100
5101 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5102 {
5103         int i = 0;
5104
5105         if (context->stream_count < 2)
5106                 return;
5107         for (i = 0; i < context->stream_count ; i++) {
5108                 if (!context->streams[i])
5109                         continue;
5110                 /*
5111                  * TODO: add a function to read AMD VSDB bits and set
5112                  * crtc_sync_master.multi_sync_enabled flag
5113                  * For now it's set to false
5114                  */
5115                 set_multisync_trigger_params(context->streams[i]);
5116         }
5117         set_master_stream(context->streams, context->stream_count);
5118 }
5119
5120 static struct dc_stream_state *
5121 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5122                        const struct drm_display_mode *drm_mode,
5123                        const struct dm_connector_state *dm_state,
5124                        const struct dc_stream_state *old_stream,
5125                        int requested_bpc)
5126 {
5127         struct drm_display_mode *preferred_mode = NULL;
5128         struct drm_connector *drm_connector;
5129         const struct drm_connector_state *con_state =
5130                 dm_state ? &dm_state->base : NULL;
5131         struct dc_stream_state *stream = NULL;
5132         struct drm_display_mode mode = *drm_mode;
5133         bool native_mode_found = false;
5134         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5135         int mode_refresh;
5136         int preferred_refresh = 0;
5137 #if defined(CONFIG_DRM_AMD_DC_DCN)
5138         struct dsc_dec_dpcd_caps dsc_caps;
5139         uint32_t link_bandwidth_kbps;
5140 #endif
5141         struct dc_sink *sink = NULL;
5142         if (aconnector == NULL) {
5143                 DRM_ERROR("aconnector is NULL!\n");
5144                 return stream;
5145         }
5146
5147         drm_connector = &aconnector->base;
5148
5149         if (!aconnector->dc_sink) {
5150                 sink = create_fake_sink(aconnector);
5151                 if (!sink)
5152                         return stream;
5153         } else {
5154                 sink = aconnector->dc_sink;
5155                 dc_sink_retain(sink);
5156         }
5157
5158         stream = dc_create_stream_for_sink(sink);
5159
5160         if (stream == NULL) {
5161                 DRM_ERROR("Failed to create stream for sink!\n");
5162                 goto finish;
5163         }
5164
5165         stream->dm_stream_context = aconnector;
5166
5167         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5168                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5169
5170         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5171                 /* Search for preferred mode */
5172                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5173                         native_mode_found = true;
5174                         break;
5175                 }
5176         }
5177         if (!native_mode_found)
5178                 preferred_mode = list_first_entry_or_null(
5179                                 &aconnector->base.modes,
5180                                 struct drm_display_mode,
5181                                 head);
5182
5183         mode_refresh = drm_mode_vrefresh(&mode);
5184
5185         if (preferred_mode == NULL) {
5186                 /*
5187                  * This may not be an error, the use case is when we have no
5188                  * usermode calls to reset and set mode upon hotplug. In this
5189                  * case, we call set mode ourselves to restore the previous mode
5190                  * and the modelist may not be filled in in time.
5191                  */
5192                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5193         } else {
5194                 decide_crtc_timing_for_drm_display_mode(
5195                                 &mode, preferred_mode,
5196                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5197                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5198         }
5199
5200         if (!dm_state)
5201                 drm_mode_set_crtcinfo(&mode, 0);
5202
5203         /*
5204         * If scaling is enabled and refresh rate didn't change
5205         * we copy the vic and polarities of the old timings
5206         */
5207         if (!scale || mode_refresh != preferred_refresh)
5208                 fill_stream_properties_from_drm_display_mode(stream,
5209                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
5210         else
5211                 fill_stream_properties_from_drm_display_mode(stream,
5212                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5213
5214         stream->timing.flags.DSC = 0;
5215
5216         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5217 #if defined(CONFIG_DRM_AMD_DC_DCN)
5218                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5219                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5220                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5221                                       &dsc_caps);
5222                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5223                                                              dc_link_get_link_cap(aconnector->dc_link));
5224
5225                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5226                         /* Set DSC policy according to dsc_clock_en */
5227                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5228                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5229
5230                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5231                                                   &dsc_caps,
5232                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5233                                                   0,
5234                                                   link_bandwidth_kbps,
5235                                                   &stream->timing,
5236                                                   &stream->timing.dsc_cfg))
5237                                 stream->timing.flags.DSC = 1;
5238                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5239                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5240                                 stream->timing.flags.DSC = 1;
5241
5242                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5243                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5244
5245                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5246                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5247
5248                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5249                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5250                 }
5251 #endif
5252         }
5253
5254         update_stream_scaling_settings(&mode, dm_state, stream);
5255
5256         fill_audio_info(
5257                 &stream->audio_info,
5258                 drm_connector,
5259                 sink);
5260
5261         update_stream_signal(stream, sink);
5262
5263         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5264                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5265
5266         if (stream->link->psr_settings.psr_feature_enabled) {
5267                 //
5268                 // should decide stream support vsc sdp colorimetry capability
5269                 // before building vsc info packet
5270                 //
5271                 stream->use_vsc_sdp_for_colorimetry = false;
5272                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5273                         stream->use_vsc_sdp_for_colorimetry =
5274                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5275                 } else {
5276                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5277                                 stream->use_vsc_sdp_for_colorimetry = true;
5278                 }
5279                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5280         }
5281 finish:
5282         dc_sink_release(sink);
5283
5284         return stream;
5285 }
5286
5287 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5288 {
5289         drm_crtc_cleanup(crtc);
5290         kfree(crtc);
5291 }
5292
5293 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5294                                   struct drm_crtc_state *state)
5295 {
5296         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5297
5298         /* TODO Destroy dc_stream objects are stream object is flattened */
5299         if (cur->stream)
5300                 dc_stream_release(cur->stream);
5301
5302
5303         __drm_atomic_helper_crtc_destroy_state(state);
5304
5305
5306         kfree(state);
5307 }
5308
5309 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5310 {
5311         struct dm_crtc_state *state;
5312
5313         if (crtc->state)
5314                 dm_crtc_destroy_state(crtc, crtc->state);
5315
5316         state = kzalloc(sizeof(*state), GFP_KERNEL);
5317         if (WARN_ON(!state))
5318                 return;
5319
5320         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5321 }
5322
5323 static struct drm_crtc_state *
5324 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5325 {
5326         struct dm_crtc_state *state, *cur;
5327
5328         cur = to_dm_crtc_state(crtc->state);
5329
5330         if (WARN_ON(!crtc->state))
5331                 return NULL;
5332
5333         state = kzalloc(sizeof(*state), GFP_KERNEL);
5334         if (!state)
5335                 return NULL;
5336
5337         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5338
5339         if (cur->stream) {
5340                 state->stream = cur->stream;
5341                 dc_stream_retain(state->stream);
5342         }
5343
5344         state->active_planes = cur->active_planes;
5345         state->vrr_infopacket = cur->vrr_infopacket;
5346         state->abm_level = cur->abm_level;
5347         state->vrr_supported = cur->vrr_supported;
5348         state->freesync_config = cur->freesync_config;
5349         state->crc_src = cur->crc_src;
5350         state->cm_has_degamma = cur->cm_has_degamma;
5351         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5352
5353         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5354
5355         return &state->base;
5356 }
5357
5358 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5359 {
5360         enum dc_irq_source irq_source;
5361         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5362         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5363         int rc;
5364
5365         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5366
5367         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5368
5369         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5370                          acrtc->crtc_id, enable ? "en" : "dis", rc);
5371         return rc;
5372 }
5373
5374 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5375 {
5376         enum dc_irq_source irq_source;
5377         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5378         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5379         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5380         struct amdgpu_display_manager *dm = &adev->dm;
5381         int rc = 0;
5382
5383         if (enable) {
5384                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5385                 if (amdgpu_dm_vrr_active(acrtc_state))
5386                         rc = dm_set_vupdate_irq(crtc, true);
5387         } else {
5388                 /* vblank irq off -> vupdate irq off */
5389                 rc = dm_set_vupdate_irq(crtc, false);
5390         }
5391
5392         if (rc)
5393                 return rc;
5394
5395         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5396
5397         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5398                 return -EBUSY;
5399
5400 #if defined(CONFIG_DRM_AMD_DC_DCN)
5401         if (amdgpu_in_reset(adev))
5402                 return 0;
5403
5404         mutex_lock(&dm->dc_lock);
5405
5406         if (enable)
5407                 dm->active_vblank_irq_count++;
5408         else
5409                 dm->active_vblank_irq_count--;
5410
5411 #if defined(CONFIG_DRM_AMD_DC_DCN)
5412         dc_allow_idle_optimizations(
5413                 adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
5414
5415         DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
5416 #endif
5417
5418         mutex_unlock(&dm->dc_lock);
5419
5420 #endif
5421         return 0;
5422 }
5423
5424 static int dm_enable_vblank(struct drm_crtc *crtc)
5425 {
5426         return dm_set_vblank(crtc, true);
5427 }
5428
5429 static void dm_disable_vblank(struct drm_crtc *crtc)
5430 {
5431         dm_set_vblank(crtc, false);
5432 }
5433
5434 /* Implemented only the options currently availible for the driver */
5435 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5436         .reset = dm_crtc_reset_state,
5437         .destroy = amdgpu_dm_crtc_destroy,
5438         .set_config = drm_atomic_helper_set_config,
5439         .page_flip = drm_atomic_helper_page_flip,
5440         .atomic_duplicate_state = dm_crtc_duplicate_state,
5441         .atomic_destroy_state = dm_crtc_destroy_state,
5442         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5443         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5444         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5445         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5446         .enable_vblank = dm_enable_vblank,
5447         .disable_vblank = dm_disable_vblank,
5448         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5449 };
5450
5451 static enum drm_connector_status
5452 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5453 {
5454         bool connected;
5455         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5456
5457         /*
5458          * Notes:
5459          * 1. This interface is NOT called in context of HPD irq.
5460          * 2. This interface *is called* in context of user-mode ioctl. Which
5461          * makes it a bad place for *any* MST-related activity.
5462          */
5463
5464         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5465             !aconnector->fake_enable)
5466                 connected = (aconnector->dc_sink != NULL);
5467         else
5468                 connected = (aconnector->base.force == DRM_FORCE_ON);
5469
5470         update_subconnector_property(aconnector);
5471
5472         return (connected ? connector_status_connected :
5473                         connector_status_disconnected);
5474 }
5475
5476 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5477                                             struct drm_connector_state *connector_state,
5478                                             struct drm_property *property,
5479                                             uint64_t val)
5480 {
5481         struct drm_device *dev = connector->dev;
5482         struct amdgpu_device *adev = drm_to_adev(dev);
5483         struct dm_connector_state *dm_old_state =
5484                 to_dm_connector_state(connector->state);
5485         struct dm_connector_state *dm_new_state =
5486                 to_dm_connector_state(connector_state);
5487
5488         int ret = -EINVAL;
5489
5490         if (property == dev->mode_config.scaling_mode_property) {
5491                 enum amdgpu_rmx_type rmx_type;
5492
5493                 switch (val) {
5494                 case DRM_MODE_SCALE_CENTER:
5495                         rmx_type = RMX_CENTER;
5496                         break;
5497                 case DRM_MODE_SCALE_ASPECT:
5498                         rmx_type = RMX_ASPECT;
5499                         break;
5500                 case DRM_MODE_SCALE_FULLSCREEN:
5501                         rmx_type = RMX_FULL;
5502                         break;
5503                 case DRM_MODE_SCALE_NONE:
5504                 default:
5505                         rmx_type = RMX_OFF;
5506                         break;
5507                 }
5508
5509                 if (dm_old_state->scaling == rmx_type)
5510                         return 0;
5511
5512                 dm_new_state->scaling = rmx_type;
5513                 ret = 0;
5514         } else if (property == adev->mode_info.underscan_hborder_property) {
5515                 dm_new_state->underscan_hborder = val;
5516                 ret = 0;
5517         } else if (property == adev->mode_info.underscan_vborder_property) {
5518                 dm_new_state->underscan_vborder = val;
5519                 ret = 0;
5520         } else if (property == adev->mode_info.underscan_property) {
5521                 dm_new_state->underscan_enable = val;
5522                 ret = 0;
5523         } else if (property == adev->mode_info.abm_level_property) {
5524                 dm_new_state->abm_level = val;
5525                 ret = 0;
5526         }
5527
5528         return ret;
5529 }
5530
5531 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5532                                             const struct drm_connector_state *state,
5533                                             struct drm_property *property,
5534                                             uint64_t *val)
5535 {
5536         struct drm_device *dev = connector->dev;
5537         struct amdgpu_device *adev = drm_to_adev(dev);
5538         struct dm_connector_state *dm_state =
5539                 to_dm_connector_state(state);
5540         int ret = -EINVAL;
5541
5542         if (property == dev->mode_config.scaling_mode_property) {
5543                 switch (dm_state->scaling) {
5544                 case RMX_CENTER:
5545                         *val = DRM_MODE_SCALE_CENTER;
5546                         break;
5547                 case RMX_ASPECT:
5548                         *val = DRM_MODE_SCALE_ASPECT;
5549                         break;
5550                 case RMX_FULL:
5551                         *val = DRM_MODE_SCALE_FULLSCREEN;
5552                         break;
5553                 case RMX_OFF:
5554                 default:
5555                         *val = DRM_MODE_SCALE_NONE;
5556                         break;
5557                 }
5558                 ret = 0;
5559         } else if (property == adev->mode_info.underscan_hborder_property) {
5560                 *val = dm_state->underscan_hborder;
5561                 ret = 0;
5562         } else if (property == adev->mode_info.underscan_vborder_property) {
5563                 *val = dm_state->underscan_vborder;
5564                 ret = 0;
5565         } else if (property == adev->mode_info.underscan_property) {
5566                 *val = dm_state->underscan_enable;
5567                 ret = 0;
5568         } else if (property == adev->mode_info.abm_level_property) {
5569                 *val = dm_state->abm_level;
5570                 ret = 0;
5571         }
5572
5573         return ret;
5574 }
5575
5576 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5577 {
5578         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5579
5580         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5581 }
5582
5583 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5584 {
5585         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5586         const struct dc_link *link = aconnector->dc_link;
5587         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5588         struct amdgpu_display_manager *dm = &adev->dm;
5589
5590         /*
5591          * Call only if mst_mgr was iniitalized before since it's not done
5592          * for all connector types.
5593          */
5594         if (aconnector->mst_mgr.dev)
5595                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5596
5597 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5598         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5599
5600         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5601             link->type != dc_connection_none &&
5602             dm->backlight_dev) {
5603                 backlight_device_unregister(dm->backlight_dev);
5604                 dm->backlight_dev = NULL;
5605         }
5606 #endif
5607
5608         if (aconnector->dc_em_sink)
5609                 dc_sink_release(aconnector->dc_em_sink);
5610         aconnector->dc_em_sink = NULL;
5611         if (aconnector->dc_sink)
5612                 dc_sink_release(aconnector->dc_sink);
5613         aconnector->dc_sink = NULL;
5614
5615         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5616         drm_connector_unregister(connector);
5617         drm_connector_cleanup(connector);
5618         if (aconnector->i2c) {
5619                 i2c_del_adapter(&aconnector->i2c->base);
5620                 kfree(aconnector->i2c);
5621         }
5622         kfree(aconnector->dm_dp_aux.aux.name);
5623
5624         kfree(connector);
5625 }
5626
5627 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5628 {
5629         struct dm_connector_state *state =
5630                 to_dm_connector_state(connector->state);
5631
5632         if (connector->state)
5633                 __drm_atomic_helper_connector_destroy_state(connector->state);
5634
5635         kfree(state);
5636
5637         state = kzalloc(sizeof(*state), GFP_KERNEL);
5638
5639         if (state) {
5640                 state->scaling = RMX_OFF;
5641                 state->underscan_enable = false;
5642                 state->underscan_hborder = 0;
5643                 state->underscan_vborder = 0;
5644                 state->base.max_requested_bpc = 8;
5645                 state->vcpi_slots = 0;
5646                 state->pbn = 0;
5647                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5648                         state->abm_level = amdgpu_dm_abm_level;
5649
5650                 __drm_atomic_helper_connector_reset(connector, &state->base);
5651         }
5652 }
5653
5654 struct drm_connector_state *
5655 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5656 {
5657         struct dm_connector_state *state =
5658                 to_dm_connector_state(connector->state);
5659
5660         struct dm_connector_state *new_state =
5661                         kmemdup(state, sizeof(*state), GFP_KERNEL);
5662
5663         if (!new_state)
5664                 return NULL;
5665
5666         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5667
5668         new_state->freesync_capable = state->freesync_capable;
5669         new_state->abm_level = state->abm_level;
5670         new_state->scaling = state->scaling;
5671         new_state->underscan_enable = state->underscan_enable;
5672         new_state->underscan_hborder = state->underscan_hborder;
5673         new_state->underscan_vborder = state->underscan_vborder;
5674         new_state->vcpi_slots = state->vcpi_slots;
5675         new_state->pbn = state->pbn;
5676         return &new_state->base;
5677 }
5678
5679 static int
5680 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5681 {
5682         struct amdgpu_dm_connector *amdgpu_dm_connector =
5683                 to_amdgpu_dm_connector(connector);
5684         int r;
5685
5686         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5687             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5688                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5689                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5690                 if (r)
5691                         return r;
5692         }
5693
5694 #if defined(CONFIG_DEBUG_FS)
5695         connector_debugfs_init(amdgpu_dm_connector);
5696 #endif
5697
5698         return 0;
5699 }
5700
5701 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5702         .reset = amdgpu_dm_connector_funcs_reset,
5703         .detect = amdgpu_dm_connector_detect,
5704         .fill_modes = drm_helper_probe_single_connector_modes,
5705         .destroy = amdgpu_dm_connector_destroy,
5706         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5707         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5708         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5709         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5710         .late_register = amdgpu_dm_connector_late_register,
5711         .early_unregister = amdgpu_dm_connector_unregister
5712 };
5713
5714 static int get_modes(struct drm_connector *connector)
5715 {
5716         return amdgpu_dm_connector_get_modes(connector);
5717 }
5718
5719 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5720 {
5721         struct dc_sink_init_data init_params = {
5722                         .link = aconnector->dc_link,
5723                         .sink_signal = SIGNAL_TYPE_VIRTUAL
5724         };
5725         struct edid *edid;
5726
5727         if (!aconnector->base.edid_blob_ptr) {
5728                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5729                                 aconnector->base.name);
5730
5731                 aconnector->base.force = DRM_FORCE_OFF;
5732                 aconnector->base.override_edid = false;
5733                 return;
5734         }
5735
5736         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5737
5738         aconnector->edid = edid;
5739
5740         aconnector->dc_em_sink = dc_link_add_remote_sink(
5741                 aconnector->dc_link,
5742                 (uint8_t *)edid,
5743                 (edid->extensions + 1) * EDID_LENGTH,
5744                 &init_params);
5745
5746         if (aconnector->base.force == DRM_FORCE_ON) {
5747                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5748                 aconnector->dc_link->local_sink :
5749                 aconnector->dc_em_sink;
5750                 dc_sink_retain(aconnector->dc_sink);
5751         }
5752 }
5753
5754 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5755 {
5756         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5757
5758         /*
5759          * In case of headless boot with force on for DP managed connector
5760          * Those settings have to be != 0 to get initial modeset
5761          */
5762         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5763                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5764                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5765         }
5766
5767
5768         aconnector->base.override_edid = true;
5769         create_eml_sink(aconnector);
5770 }
5771
5772 static struct dc_stream_state *
5773 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5774                                 const struct drm_display_mode *drm_mode,
5775                                 const struct dm_connector_state *dm_state,
5776                                 const struct dc_stream_state *old_stream)
5777 {
5778         struct drm_connector *connector = &aconnector->base;
5779         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5780         struct dc_stream_state *stream;
5781         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5782         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5783         enum dc_status dc_result = DC_OK;
5784
5785         do {
5786                 stream = create_stream_for_sink(aconnector, drm_mode,
5787                                                 dm_state, old_stream,
5788                                                 requested_bpc);
5789                 if (stream == NULL) {
5790                         DRM_ERROR("Failed to create stream for sink!\n");
5791                         break;
5792                 }
5793
5794                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5795
5796                 if (dc_result != DC_OK) {
5797                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5798                                       drm_mode->hdisplay,
5799                                       drm_mode->vdisplay,
5800                                       drm_mode->clock,
5801                                       dc_result,
5802                                       dc_status_to_str(dc_result));
5803
5804                         dc_stream_release(stream);
5805                         stream = NULL;
5806                         requested_bpc -= 2; /* lower bpc to retry validation */
5807                 }
5808
5809         } while (stream == NULL && requested_bpc >= 6);
5810
5811         return stream;
5812 }
5813
5814 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5815                                    struct drm_display_mode *mode)
5816 {
5817         int result = MODE_ERROR;
5818         struct dc_sink *dc_sink;
5819         /* TODO: Unhardcode stream count */
5820         struct dc_stream_state *stream;
5821         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5822
5823         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5824                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5825                 return result;
5826
5827         /*
5828          * Only run this the first time mode_valid is called to initilialize
5829          * EDID mgmt
5830          */
5831         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5832                 !aconnector->dc_em_sink)
5833                 handle_edid_mgmt(aconnector);
5834
5835         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5836
5837         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5838                                 aconnector->base.force != DRM_FORCE_ON) {
5839                 DRM_ERROR("dc_sink is NULL!\n");
5840                 goto fail;
5841         }
5842
5843         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5844         if (stream) {
5845                 dc_stream_release(stream);
5846                 result = MODE_OK;
5847         }
5848
5849 fail:
5850         /* TODO: error handling*/
5851         return result;
5852 }
5853
5854 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5855                                 struct dc_info_packet *out)
5856 {
5857         struct hdmi_drm_infoframe frame;
5858         unsigned char buf[30]; /* 26 + 4 */
5859         ssize_t len;
5860         int ret, i;
5861
5862         memset(out, 0, sizeof(*out));
5863
5864         if (!state->hdr_output_metadata)
5865                 return 0;
5866
5867         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5868         if (ret)
5869                 return ret;
5870
5871         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5872         if (len < 0)
5873                 return (int)len;
5874
5875         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5876         if (len != 30)
5877                 return -EINVAL;
5878
5879         /* Prepare the infopacket for DC. */
5880         switch (state->connector->connector_type) {
5881         case DRM_MODE_CONNECTOR_HDMIA:
5882                 out->hb0 = 0x87; /* type */
5883                 out->hb1 = 0x01; /* version */
5884                 out->hb2 = 0x1A; /* length */
5885                 out->sb[0] = buf[3]; /* checksum */
5886                 i = 1;
5887                 break;
5888
5889         case DRM_MODE_CONNECTOR_DisplayPort:
5890         case DRM_MODE_CONNECTOR_eDP:
5891                 out->hb0 = 0x00; /* sdp id, zero */
5892                 out->hb1 = 0x87; /* type */
5893                 out->hb2 = 0x1D; /* payload len - 1 */
5894                 out->hb3 = (0x13 << 2); /* sdp version */
5895                 out->sb[0] = 0x01; /* version */
5896                 out->sb[1] = 0x1A; /* length */
5897                 i = 2;
5898                 break;
5899
5900         default:
5901                 return -EINVAL;
5902         }
5903
5904         memcpy(&out->sb[i], &buf[4], 26);
5905         out->valid = true;
5906
5907         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5908                        sizeof(out->sb), false);
5909
5910         return 0;
5911 }
5912
5913 static bool
5914 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5915                           const struct drm_connector_state *new_state)
5916 {
5917         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5918         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5919
5920         if (old_blob != new_blob) {
5921                 if (old_blob && new_blob &&
5922                     old_blob->length == new_blob->length)
5923                         return memcmp(old_blob->data, new_blob->data,
5924                                       old_blob->length);
5925
5926                 return true;
5927         }
5928
5929         return false;
5930 }
5931
5932 static int
5933 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5934                                  struct drm_atomic_state *state)
5935 {
5936         struct drm_connector_state *new_con_state =
5937                 drm_atomic_get_new_connector_state(state, conn);
5938         struct drm_connector_state *old_con_state =
5939                 drm_atomic_get_old_connector_state(state, conn);
5940         struct drm_crtc *crtc = new_con_state->crtc;
5941         struct drm_crtc_state *new_crtc_state;
5942         int ret;
5943
5944         trace_amdgpu_dm_connector_atomic_check(new_con_state);
5945
5946         if (!crtc)
5947                 return 0;
5948
5949         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5950                 struct dc_info_packet hdr_infopacket;
5951
5952                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5953                 if (ret)
5954                         return ret;
5955
5956                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5957                 if (IS_ERR(new_crtc_state))
5958                         return PTR_ERR(new_crtc_state);
5959
5960                 /*
5961                  * DC considers the stream backends changed if the
5962                  * static metadata changes. Forcing the modeset also
5963                  * gives a simple way for userspace to switch from
5964                  * 8bpc to 10bpc when setting the metadata to enter
5965                  * or exit HDR.
5966                  *
5967                  * Changing the static metadata after it's been
5968                  * set is permissible, however. So only force a
5969                  * modeset if we're entering or exiting HDR.
5970                  */
5971                 new_crtc_state->mode_changed =
5972                         !old_con_state->hdr_output_metadata ||
5973                         !new_con_state->hdr_output_metadata;
5974         }
5975
5976         return 0;
5977 }
5978
5979 static const struct drm_connector_helper_funcs
5980 amdgpu_dm_connector_helper_funcs = {
5981         /*
5982          * If hotplugging a second bigger display in FB Con mode, bigger resolution
5983          * modes will be filtered by drm_mode_validate_size(), and those modes
5984          * are missing after user start lightdm. So we need to renew modes list.
5985          * in get_modes call back, not just return the modes count
5986          */
5987         .get_modes = get_modes,
5988         .mode_valid = amdgpu_dm_connector_mode_valid,
5989         .atomic_check = amdgpu_dm_connector_atomic_check,
5990 };
5991
5992 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5993 {
5994 }
5995
5996 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5997 {
5998         struct drm_atomic_state *state = new_crtc_state->state;
5999         struct drm_plane *plane;
6000         int num_active = 0;
6001
6002         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6003                 struct drm_plane_state *new_plane_state;
6004
6005                 /* Cursor planes are "fake". */
6006                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6007                         continue;
6008
6009                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6010
6011                 if (!new_plane_state) {
6012                         /*
6013                          * The plane is enable on the CRTC and hasn't changed
6014                          * state. This means that it previously passed
6015                          * validation and is therefore enabled.
6016                          */
6017                         num_active += 1;
6018                         continue;
6019                 }
6020
6021                 /* We need a framebuffer to be considered enabled. */
6022                 num_active += (new_plane_state->fb != NULL);
6023         }
6024
6025         return num_active;
6026 }
6027
6028 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6029                                          struct drm_crtc_state *new_crtc_state)
6030 {
6031         struct dm_crtc_state *dm_new_crtc_state =
6032                 to_dm_crtc_state(new_crtc_state);
6033
6034         dm_new_crtc_state->active_planes = 0;
6035
6036         if (!dm_new_crtc_state->stream)
6037                 return;
6038
6039         dm_new_crtc_state->active_planes =
6040                 count_crtc_active_planes(new_crtc_state);
6041 }
6042
6043 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6044                                        struct drm_atomic_state *state)
6045 {
6046         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6047                                                                           crtc);
6048         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6049         struct dc *dc = adev->dm.dc;
6050         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6051         int ret = -EINVAL;
6052
6053         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6054
6055         dm_update_crtc_active_planes(crtc, crtc_state);
6056
6057         if (unlikely(!dm_crtc_state->stream &&
6058                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6059                 WARN_ON(1);
6060                 return ret;
6061         }
6062
6063         /*
6064          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6065          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6066          * planes are disabled, which is not supported by the hardware. And there is legacy
6067          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6068          */
6069         if (crtc_state->enable &&
6070             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6071                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6072                 return -EINVAL;
6073         }
6074
6075         /* In some use cases, like reset, no stream is attached */
6076         if (!dm_crtc_state->stream)
6077                 return 0;
6078
6079         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6080                 return 0;
6081
6082         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6083         return ret;
6084 }
6085
6086 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6087                                       const struct drm_display_mode *mode,
6088                                       struct drm_display_mode *adjusted_mode)
6089 {
6090         return true;
6091 }
6092
6093 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6094         .disable = dm_crtc_helper_disable,
6095         .atomic_check = dm_crtc_helper_atomic_check,
6096         .mode_fixup = dm_crtc_helper_mode_fixup,
6097         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6098 };
6099
6100 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6101 {
6102
6103 }
6104
6105 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6106 {
6107         switch (display_color_depth) {
6108                 case COLOR_DEPTH_666:
6109                         return 6;
6110                 case COLOR_DEPTH_888:
6111                         return 8;
6112                 case COLOR_DEPTH_101010:
6113                         return 10;
6114                 case COLOR_DEPTH_121212:
6115                         return 12;
6116                 case COLOR_DEPTH_141414:
6117                         return 14;
6118                 case COLOR_DEPTH_161616:
6119                         return 16;
6120                 default:
6121                         break;
6122                 }
6123         return 0;
6124 }
6125
6126 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6127                                           struct drm_crtc_state *crtc_state,
6128                                           struct drm_connector_state *conn_state)
6129 {
6130         struct drm_atomic_state *state = crtc_state->state;
6131         struct drm_connector *connector = conn_state->connector;
6132         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6133         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6134         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6135         struct drm_dp_mst_topology_mgr *mst_mgr;
6136         struct drm_dp_mst_port *mst_port;
6137         enum dc_color_depth color_depth;
6138         int clock, bpp = 0;
6139         bool is_y420 = false;
6140
6141         if (!aconnector->port || !aconnector->dc_sink)
6142                 return 0;
6143
6144         mst_port = aconnector->port;
6145         mst_mgr = &aconnector->mst_port->mst_mgr;
6146
6147         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6148                 return 0;
6149
6150         if (!state->duplicated) {
6151                 int max_bpc = conn_state->max_requested_bpc;
6152                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6153                                 aconnector->force_yuv420_output;
6154                 color_depth = convert_color_depth_from_display_info(connector,
6155                                                                     is_y420,
6156                                                                     max_bpc);
6157                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6158                 clock = adjusted_mode->clock;
6159                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6160         }
6161         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6162                                                                            mst_mgr,
6163                                                                            mst_port,
6164                                                                            dm_new_connector_state->pbn,
6165                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6166         if (dm_new_connector_state->vcpi_slots < 0) {
6167                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6168                 return dm_new_connector_state->vcpi_slots;
6169         }
6170         return 0;
6171 }
6172
6173 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6174         .disable = dm_encoder_helper_disable,
6175         .atomic_check = dm_encoder_helper_atomic_check
6176 };
6177
6178 #if defined(CONFIG_DRM_AMD_DC_DCN)
6179 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6180                                             struct dc_state *dc_state)
6181 {
6182         struct dc_stream_state *stream = NULL;
6183         struct drm_connector *connector;
6184         struct drm_connector_state *new_con_state, *old_con_state;
6185         struct amdgpu_dm_connector *aconnector;
6186         struct dm_connector_state *dm_conn_state;
6187         int i, j, clock, bpp;
6188         int vcpi, pbn_div, pbn = 0;
6189
6190         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6191
6192                 aconnector = to_amdgpu_dm_connector(connector);
6193
6194                 if (!aconnector->port)
6195                         continue;
6196
6197                 if (!new_con_state || !new_con_state->crtc)
6198                         continue;
6199
6200                 dm_conn_state = to_dm_connector_state(new_con_state);
6201
6202                 for (j = 0; j < dc_state->stream_count; j++) {
6203                         stream = dc_state->streams[j];
6204                         if (!stream)
6205                                 continue;
6206
6207                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6208                                 break;
6209
6210                         stream = NULL;
6211                 }
6212
6213                 if (!stream)
6214                         continue;
6215
6216                 if (stream->timing.flags.DSC != 1) {
6217                         drm_dp_mst_atomic_enable_dsc(state,
6218                                                      aconnector->port,
6219                                                      dm_conn_state->pbn,
6220                                                      0,
6221                                                      false);
6222                         continue;
6223                 }
6224
6225                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6226                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6227                 clock = stream->timing.pix_clk_100hz / 10;
6228                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6229                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6230                                                     aconnector->port,
6231                                                     pbn, pbn_div,
6232                                                     true);
6233                 if (vcpi < 0)
6234                         return vcpi;
6235
6236                 dm_conn_state->pbn = pbn;
6237                 dm_conn_state->vcpi_slots = vcpi;
6238         }
6239         return 0;
6240 }
6241 #endif
6242
6243 static void dm_drm_plane_reset(struct drm_plane *plane)
6244 {
6245         struct dm_plane_state *amdgpu_state = NULL;
6246
6247         if (plane->state)
6248                 plane->funcs->atomic_destroy_state(plane, plane->state);
6249
6250         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6251         WARN_ON(amdgpu_state == NULL);
6252
6253         if (amdgpu_state)
6254                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6255 }
6256
6257 static struct drm_plane_state *
6258 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6259 {
6260         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6261
6262         old_dm_plane_state = to_dm_plane_state(plane->state);
6263         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6264         if (!dm_plane_state)
6265                 return NULL;
6266
6267         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6268
6269         if (old_dm_plane_state->dc_state) {
6270                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6271                 dc_plane_state_retain(dm_plane_state->dc_state);
6272         }
6273
6274         return &dm_plane_state->base;
6275 }
6276
6277 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6278                                 struct drm_plane_state *state)
6279 {
6280         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6281
6282         if (dm_plane_state->dc_state)
6283                 dc_plane_state_release(dm_plane_state->dc_state);
6284
6285         drm_atomic_helper_plane_destroy_state(plane, state);
6286 }
6287
6288 static const struct drm_plane_funcs dm_plane_funcs = {
6289         .update_plane   = drm_atomic_helper_update_plane,
6290         .disable_plane  = drm_atomic_helper_disable_plane,
6291         .destroy        = drm_primary_helper_destroy,
6292         .reset = dm_drm_plane_reset,
6293         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6294         .atomic_destroy_state = dm_drm_plane_destroy_state,
6295         .format_mod_supported = dm_plane_format_mod_supported,
6296 };
6297
6298 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6299                                       struct drm_plane_state *new_state)
6300 {
6301         struct amdgpu_framebuffer *afb;
6302         struct drm_gem_object *obj;
6303         struct amdgpu_device *adev;
6304         struct amdgpu_bo *rbo;
6305         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6306         struct list_head list;
6307         struct ttm_validate_buffer tv;
6308         struct ww_acquire_ctx ticket;
6309         uint32_t domain;
6310         int r;
6311
6312         if (!new_state->fb) {
6313                 DRM_DEBUG_DRIVER("No FB bound\n");
6314                 return 0;
6315         }
6316
6317         afb = to_amdgpu_framebuffer(new_state->fb);
6318         obj = new_state->fb->obj[0];
6319         rbo = gem_to_amdgpu_bo(obj);
6320         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6321         INIT_LIST_HEAD(&list);
6322
6323         tv.bo = &rbo->tbo;
6324         tv.num_shared = 1;
6325         list_add(&tv.head, &list);
6326
6327         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6328         if (r) {
6329                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6330                 return r;
6331         }
6332
6333         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6334                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6335         else
6336                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6337
6338         r = amdgpu_bo_pin(rbo, domain);
6339         if (unlikely(r != 0)) {
6340                 if (r != -ERESTARTSYS)
6341                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6342                 ttm_eu_backoff_reservation(&ticket, &list);
6343                 return r;
6344         }
6345
6346         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6347         if (unlikely(r != 0)) {
6348                 amdgpu_bo_unpin(rbo);
6349                 ttm_eu_backoff_reservation(&ticket, &list);
6350                 DRM_ERROR("%p bind failed\n", rbo);
6351                 return r;
6352         }
6353
6354         ttm_eu_backoff_reservation(&ticket, &list);
6355
6356         afb->address = amdgpu_bo_gpu_offset(rbo);
6357
6358         amdgpu_bo_ref(rbo);
6359
6360         /**
6361          * We don't do surface updates on planes that have been newly created,
6362          * but we also don't have the afb->address during atomic check.
6363          *
6364          * Fill in buffer attributes depending on the address here, but only on
6365          * newly created planes since they're not being used by DC yet and this
6366          * won't modify global state.
6367          */
6368         dm_plane_state_old = to_dm_plane_state(plane->state);
6369         dm_plane_state_new = to_dm_plane_state(new_state);
6370
6371         if (dm_plane_state_new->dc_state &&
6372             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6373                 struct dc_plane_state *plane_state =
6374                         dm_plane_state_new->dc_state;
6375                 bool force_disable_dcc = !plane_state->dcc.enable;
6376
6377                 fill_plane_buffer_attributes(
6378                         adev, afb, plane_state->format, plane_state->rotation,
6379                         afb->tiling_flags,
6380                         &plane_state->tiling_info, &plane_state->plane_size,
6381                         &plane_state->dcc, &plane_state->address,
6382                         afb->tmz_surface, force_disable_dcc);
6383         }
6384
6385         return 0;
6386 }
6387
6388 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6389                                        struct drm_plane_state *old_state)
6390 {
6391         struct amdgpu_bo *rbo;
6392         int r;
6393
6394         if (!old_state->fb)
6395                 return;
6396
6397         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6398         r = amdgpu_bo_reserve(rbo, false);
6399         if (unlikely(r)) {
6400                 DRM_ERROR("failed to reserve rbo before unpin\n");
6401                 return;
6402         }
6403
6404         amdgpu_bo_unpin(rbo);
6405         amdgpu_bo_unreserve(rbo);
6406         amdgpu_bo_unref(&rbo);
6407 }
6408
6409 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6410                                        struct drm_crtc_state *new_crtc_state)
6411 {
6412         struct drm_framebuffer *fb = state->fb;
6413         int min_downscale, max_upscale;
6414         int min_scale = 0;
6415         int max_scale = INT_MAX;
6416
6417         /* Plane enabled? Get min/max allowed scaling factors from plane caps. */
6418         if (fb && state->crtc) {
6419                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6420                                              &min_downscale, &max_upscale);
6421                 /*
6422                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6423                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6424                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6425                  */
6426                 min_scale = (1000 << 16) / max_upscale;
6427                 max_scale = (1000 << 16) / min_downscale;
6428         }
6429
6430         return drm_atomic_helper_check_plane_state(
6431                 state, new_crtc_state, min_scale, max_scale, true, true);
6432 }
6433
6434 static int dm_plane_atomic_check(struct drm_plane *plane,
6435                                  struct drm_plane_state *state)
6436 {
6437         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6438         struct dc *dc = adev->dm.dc;
6439         struct dm_plane_state *dm_plane_state;
6440         struct dc_scaling_info scaling_info;
6441         struct drm_crtc_state *new_crtc_state;
6442         int ret;
6443
6444         trace_amdgpu_dm_plane_atomic_check(state);
6445
6446         dm_plane_state = to_dm_plane_state(state);
6447
6448         if (!dm_plane_state->dc_state)
6449                 return 0;
6450
6451         new_crtc_state =
6452                 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6453         if (!new_crtc_state)
6454                 return -EINVAL;
6455
6456         ret = dm_plane_helper_check_state(state, new_crtc_state);
6457         if (ret)
6458                 return ret;
6459
6460         ret = fill_dc_scaling_info(state, &scaling_info);
6461         if (ret)
6462                 return ret;
6463
6464         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6465                 return 0;
6466
6467         return -EINVAL;
6468 }
6469
6470 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6471                                        struct drm_plane_state *new_plane_state)
6472 {
6473         /* Only support async updates on cursor planes. */
6474         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6475                 return -EINVAL;
6476
6477         return 0;
6478 }
6479
6480 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6481                                          struct drm_plane_state *new_state)
6482 {
6483         struct drm_plane_state *old_state =
6484                 drm_atomic_get_old_plane_state(new_state->state, plane);
6485
6486         trace_amdgpu_dm_atomic_update_cursor(new_state);
6487
6488         swap(plane->state->fb, new_state->fb);
6489
6490         plane->state->src_x = new_state->src_x;
6491         plane->state->src_y = new_state->src_y;
6492         plane->state->src_w = new_state->src_w;
6493         plane->state->src_h = new_state->src_h;
6494         plane->state->crtc_x = new_state->crtc_x;
6495         plane->state->crtc_y = new_state->crtc_y;
6496         plane->state->crtc_w = new_state->crtc_w;
6497         plane->state->crtc_h = new_state->crtc_h;
6498
6499         handle_cursor_update(plane, old_state);
6500 }
6501
6502 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6503         .prepare_fb = dm_plane_helper_prepare_fb,
6504         .cleanup_fb = dm_plane_helper_cleanup_fb,
6505         .atomic_check = dm_plane_atomic_check,
6506         .atomic_async_check = dm_plane_atomic_async_check,
6507         .atomic_async_update = dm_plane_atomic_async_update
6508 };
6509
6510 /*
6511  * TODO: these are currently initialized to rgb formats only.
6512  * For future use cases we should either initialize them dynamically based on
6513  * plane capabilities, or initialize this array to all formats, so internal drm
6514  * check will succeed, and let DC implement proper check
6515  */
6516 static const uint32_t rgb_formats[] = {
6517         DRM_FORMAT_XRGB8888,
6518         DRM_FORMAT_ARGB8888,
6519         DRM_FORMAT_RGBA8888,
6520         DRM_FORMAT_XRGB2101010,
6521         DRM_FORMAT_XBGR2101010,
6522         DRM_FORMAT_ARGB2101010,
6523         DRM_FORMAT_ABGR2101010,
6524         DRM_FORMAT_XBGR8888,
6525         DRM_FORMAT_ABGR8888,
6526         DRM_FORMAT_RGB565,
6527 };
6528
6529 static const uint32_t overlay_formats[] = {
6530         DRM_FORMAT_XRGB8888,
6531         DRM_FORMAT_ARGB8888,
6532         DRM_FORMAT_RGBA8888,
6533         DRM_FORMAT_XBGR8888,
6534         DRM_FORMAT_ABGR8888,
6535         DRM_FORMAT_RGB565
6536 };
6537
6538 static const u32 cursor_formats[] = {
6539         DRM_FORMAT_ARGB8888
6540 };
6541
6542 static int get_plane_formats(const struct drm_plane *plane,
6543                              const struct dc_plane_cap *plane_cap,
6544                              uint32_t *formats, int max_formats)
6545 {
6546         int i, num_formats = 0;
6547
6548         /*
6549          * TODO: Query support for each group of formats directly from
6550          * DC plane caps. This will require adding more formats to the
6551          * caps list.
6552          */
6553
6554         switch (plane->type) {
6555         case DRM_PLANE_TYPE_PRIMARY:
6556                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6557                         if (num_formats >= max_formats)
6558                                 break;
6559
6560                         formats[num_formats++] = rgb_formats[i];
6561                 }
6562
6563                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6564                         formats[num_formats++] = DRM_FORMAT_NV12;
6565                 if (plane_cap && plane_cap->pixel_format_support.p010)
6566                         formats[num_formats++] = DRM_FORMAT_P010;
6567                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6568                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6569                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6570                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6571                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6572                 }
6573                 break;
6574
6575         case DRM_PLANE_TYPE_OVERLAY:
6576                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6577                         if (num_formats >= max_formats)
6578                                 break;
6579
6580                         formats[num_formats++] = overlay_formats[i];
6581                 }
6582                 break;
6583
6584         case DRM_PLANE_TYPE_CURSOR:
6585                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6586                         if (num_formats >= max_formats)
6587                                 break;
6588
6589                         formats[num_formats++] = cursor_formats[i];
6590                 }
6591                 break;
6592         }
6593
6594         return num_formats;
6595 }
6596
6597 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6598                                 struct drm_plane *plane,
6599                                 unsigned long possible_crtcs,
6600                                 const struct dc_plane_cap *plane_cap)
6601 {
6602         uint32_t formats[32];
6603         int num_formats;
6604         int res = -EPERM;
6605         unsigned int supported_rotations;
6606         uint64_t *modifiers = NULL;
6607
6608         num_formats = get_plane_formats(plane, plane_cap, formats,
6609                                         ARRAY_SIZE(formats));
6610
6611         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6612         if (res)
6613                 return res;
6614
6615         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6616                                        &dm_plane_funcs, formats, num_formats,
6617                                        modifiers, plane->type, NULL);
6618         kfree(modifiers);
6619         if (res)
6620                 return res;
6621
6622         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6623             plane_cap && plane_cap->per_pixel_alpha) {
6624                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6625                                           BIT(DRM_MODE_BLEND_PREMULTI);
6626
6627                 drm_plane_create_alpha_property(plane);
6628                 drm_plane_create_blend_mode_property(plane, blend_caps);
6629         }
6630
6631         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6632             plane_cap &&
6633             (plane_cap->pixel_format_support.nv12 ||
6634              plane_cap->pixel_format_support.p010)) {
6635                 /* This only affects YUV formats. */
6636                 drm_plane_create_color_properties(
6637                         plane,
6638                         BIT(DRM_COLOR_YCBCR_BT601) |
6639                         BIT(DRM_COLOR_YCBCR_BT709) |
6640                         BIT(DRM_COLOR_YCBCR_BT2020),
6641                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6642                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6643                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6644         }
6645
6646         supported_rotations =
6647                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6648                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6649
6650         if (dm->adev->asic_type >= CHIP_BONAIRE &&
6651             plane->type != DRM_PLANE_TYPE_CURSOR)
6652                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6653                                                    supported_rotations);
6654
6655         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6656
6657         /* Create (reset) the plane state */
6658         if (plane->funcs->reset)
6659                 plane->funcs->reset(plane);
6660
6661         return 0;
6662 }
6663
6664 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6665                                struct drm_plane *plane,
6666                                uint32_t crtc_index)
6667 {
6668         struct amdgpu_crtc *acrtc = NULL;
6669         struct drm_plane *cursor_plane;
6670
6671         int res = -ENOMEM;
6672
6673         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6674         if (!cursor_plane)
6675                 goto fail;
6676
6677         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6678         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6679
6680         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6681         if (!acrtc)
6682                 goto fail;
6683
6684         res = drm_crtc_init_with_planes(
6685                         dm->ddev,
6686                         &acrtc->base,
6687                         plane,
6688                         cursor_plane,
6689                         &amdgpu_dm_crtc_funcs, NULL);
6690
6691         if (res)
6692                 goto fail;
6693
6694         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6695
6696         /* Create (reset) the plane state */
6697         if (acrtc->base.funcs->reset)
6698                 acrtc->base.funcs->reset(&acrtc->base);
6699
6700         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6701         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6702
6703         acrtc->crtc_id = crtc_index;
6704         acrtc->base.enabled = false;
6705         acrtc->otg_inst = -1;
6706
6707         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6708         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6709                                    true, MAX_COLOR_LUT_ENTRIES);
6710         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6711
6712         return 0;
6713
6714 fail:
6715         kfree(acrtc);
6716         kfree(cursor_plane);
6717         return res;
6718 }
6719
6720
6721 static int to_drm_connector_type(enum signal_type st)
6722 {
6723         switch (st) {
6724         case SIGNAL_TYPE_HDMI_TYPE_A:
6725                 return DRM_MODE_CONNECTOR_HDMIA;
6726         case SIGNAL_TYPE_EDP:
6727                 return DRM_MODE_CONNECTOR_eDP;
6728         case SIGNAL_TYPE_LVDS:
6729                 return DRM_MODE_CONNECTOR_LVDS;
6730         case SIGNAL_TYPE_RGB:
6731                 return DRM_MODE_CONNECTOR_VGA;
6732         case SIGNAL_TYPE_DISPLAY_PORT:
6733         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6734                 return DRM_MODE_CONNECTOR_DisplayPort;
6735         case SIGNAL_TYPE_DVI_DUAL_LINK:
6736         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6737                 return DRM_MODE_CONNECTOR_DVID;
6738         case SIGNAL_TYPE_VIRTUAL:
6739                 return DRM_MODE_CONNECTOR_VIRTUAL;
6740
6741         default:
6742                 return DRM_MODE_CONNECTOR_Unknown;
6743         }
6744 }
6745
6746 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6747 {
6748         struct drm_encoder *encoder;
6749
6750         /* There is only one encoder per connector */
6751         drm_connector_for_each_possible_encoder(connector, encoder)
6752                 return encoder;
6753
6754         return NULL;
6755 }
6756
6757 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6758 {
6759         struct drm_encoder *encoder;
6760         struct amdgpu_encoder *amdgpu_encoder;
6761
6762         encoder = amdgpu_dm_connector_to_encoder(connector);
6763
6764         if (encoder == NULL)
6765                 return;
6766
6767         amdgpu_encoder = to_amdgpu_encoder(encoder);
6768
6769         amdgpu_encoder->native_mode.clock = 0;
6770
6771         if (!list_empty(&connector->probed_modes)) {
6772                 struct drm_display_mode *preferred_mode = NULL;
6773
6774                 list_for_each_entry(preferred_mode,
6775                                     &connector->probed_modes,
6776                                     head) {
6777                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6778                                 amdgpu_encoder->native_mode = *preferred_mode;
6779
6780                         break;
6781                 }
6782
6783         }
6784 }
6785
6786 static struct drm_display_mode *
6787 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6788                              char *name,
6789                              int hdisplay, int vdisplay)
6790 {
6791         struct drm_device *dev = encoder->dev;
6792         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6793         struct drm_display_mode *mode = NULL;
6794         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6795
6796         mode = drm_mode_duplicate(dev, native_mode);
6797
6798         if (mode == NULL)
6799                 return NULL;
6800
6801         mode->hdisplay = hdisplay;
6802         mode->vdisplay = vdisplay;
6803         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6804         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6805
6806         return mode;
6807
6808 }
6809
6810 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6811                                                  struct drm_connector *connector)
6812 {
6813         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6814         struct drm_display_mode *mode = NULL;
6815         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6816         struct amdgpu_dm_connector *amdgpu_dm_connector =
6817                                 to_amdgpu_dm_connector(connector);
6818         int i;
6819         int n;
6820         struct mode_size {
6821                 char name[DRM_DISPLAY_MODE_LEN];
6822                 int w;
6823                 int h;
6824         } common_modes[] = {
6825                 {  "640x480",  640,  480},
6826                 {  "800x600",  800,  600},
6827                 { "1024x768", 1024,  768},
6828                 { "1280x720", 1280,  720},
6829                 { "1280x800", 1280,  800},
6830                 {"1280x1024", 1280, 1024},
6831                 { "1440x900", 1440,  900},
6832                 {"1680x1050", 1680, 1050},
6833                 {"1600x1200", 1600, 1200},
6834                 {"1920x1080", 1920, 1080},
6835                 {"1920x1200", 1920, 1200}
6836         };
6837
6838         n = ARRAY_SIZE(common_modes);
6839
6840         for (i = 0; i < n; i++) {
6841                 struct drm_display_mode *curmode = NULL;
6842                 bool mode_existed = false;
6843
6844                 if (common_modes[i].w > native_mode->hdisplay ||
6845                     common_modes[i].h > native_mode->vdisplay ||
6846                    (common_modes[i].w == native_mode->hdisplay &&
6847                     common_modes[i].h == native_mode->vdisplay))
6848                         continue;
6849
6850                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6851                         if (common_modes[i].w == curmode->hdisplay &&
6852                             common_modes[i].h == curmode->vdisplay) {
6853                                 mode_existed = true;
6854                                 break;
6855                         }
6856                 }
6857
6858                 if (mode_existed)
6859                         continue;
6860
6861                 mode = amdgpu_dm_create_common_mode(encoder,
6862                                 common_modes[i].name, common_modes[i].w,
6863                                 common_modes[i].h);
6864                 drm_mode_probed_add(connector, mode);
6865                 amdgpu_dm_connector->num_modes++;
6866         }
6867 }
6868
6869 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6870                                               struct edid *edid)
6871 {
6872         struct amdgpu_dm_connector *amdgpu_dm_connector =
6873                         to_amdgpu_dm_connector(connector);
6874
6875         if (edid) {
6876                 /* empty probed_modes */
6877                 INIT_LIST_HEAD(&connector->probed_modes);
6878                 amdgpu_dm_connector->num_modes =
6879                                 drm_add_edid_modes(connector, edid);
6880
6881                 /* sorting the probed modes before calling function
6882                  * amdgpu_dm_get_native_mode() since EDID can have
6883                  * more than one preferred mode. The modes that are
6884                  * later in the probed mode list could be of higher
6885                  * and preferred resolution. For example, 3840x2160
6886                  * resolution in base EDID preferred timing and 4096x2160
6887                  * preferred resolution in DID extension block later.
6888                  */
6889                 drm_mode_sort(&connector->probed_modes);
6890                 amdgpu_dm_get_native_mode(connector);
6891         } else {
6892                 amdgpu_dm_connector->num_modes = 0;
6893         }
6894 }
6895
6896 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6897 {
6898         struct amdgpu_dm_connector *amdgpu_dm_connector =
6899                         to_amdgpu_dm_connector(connector);
6900         struct drm_encoder *encoder;
6901         struct edid *edid = amdgpu_dm_connector->edid;
6902
6903         encoder = amdgpu_dm_connector_to_encoder(connector);
6904
6905         if (!drm_edid_is_valid(edid)) {
6906                 amdgpu_dm_connector->num_modes =
6907                                 drm_add_modes_noedid(connector, 640, 480);
6908         } else {
6909                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6910                 amdgpu_dm_connector_add_common_modes(encoder, connector);
6911         }
6912         amdgpu_dm_fbc_init(connector);
6913
6914         return amdgpu_dm_connector->num_modes;
6915 }
6916
6917 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6918                                      struct amdgpu_dm_connector *aconnector,
6919                                      int connector_type,
6920                                      struct dc_link *link,
6921                                      int link_index)
6922 {
6923         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6924
6925         /*
6926          * Some of the properties below require access to state, like bpc.
6927          * Allocate some default initial connector state with our reset helper.
6928          */
6929         if (aconnector->base.funcs->reset)
6930                 aconnector->base.funcs->reset(&aconnector->base);
6931
6932         aconnector->connector_id = link_index;
6933         aconnector->dc_link = link;
6934         aconnector->base.interlace_allowed = false;
6935         aconnector->base.doublescan_allowed = false;
6936         aconnector->base.stereo_allowed = false;
6937         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6938         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6939         aconnector->audio_inst = -1;
6940         mutex_init(&aconnector->hpd_lock);
6941
6942         /*
6943          * configure support HPD hot plug connector_>polled default value is 0
6944          * which means HPD hot plug not supported
6945          */
6946         switch (connector_type) {
6947         case DRM_MODE_CONNECTOR_HDMIA:
6948                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6949                 aconnector->base.ycbcr_420_allowed =
6950                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6951                 break;
6952         case DRM_MODE_CONNECTOR_DisplayPort:
6953                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6954                 aconnector->base.ycbcr_420_allowed =
6955                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
6956                 break;
6957         case DRM_MODE_CONNECTOR_DVID:
6958                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6959                 break;
6960         default:
6961                 break;
6962         }
6963
6964         drm_object_attach_property(&aconnector->base.base,
6965                                 dm->ddev->mode_config.scaling_mode_property,
6966                                 DRM_MODE_SCALE_NONE);
6967
6968         drm_object_attach_property(&aconnector->base.base,
6969                                 adev->mode_info.underscan_property,
6970                                 UNDERSCAN_OFF);
6971         drm_object_attach_property(&aconnector->base.base,
6972                                 adev->mode_info.underscan_hborder_property,
6973                                 0);
6974         drm_object_attach_property(&aconnector->base.base,
6975                                 adev->mode_info.underscan_vborder_property,
6976                                 0);
6977
6978         if (!aconnector->mst_port)
6979                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6980
6981         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6982         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6983         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6984
6985         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6986             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6987                 drm_object_attach_property(&aconnector->base.base,
6988                                 adev->mode_info.abm_level_property, 0);
6989         }
6990
6991         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6992             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6993             connector_type == DRM_MODE_CONNECTOR_eDP) {
6994                 drm_object_attach_property(
6995                         &aconnector->base.base,
6996                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
6997
6998                 if (!aconnector->mst_port)
6999                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7000
7001 #ifdef CONFIG_DRM_AMD_DC_HDCP
7002                 if (adev->dm.hdcp_workqueue)
7003                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7004 #endif
7005         }
7006 }
7007
7008 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7009                               struct i2c_msg *msgs, int num)
7010 {
7011         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7012         struct ddc_service *ddc_service = i2c->ddc_service;
7013         struct i2c_command cmd;
7014         int i;
7015         int result = -EIO;
7016
7017         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7018
7019         if (!cmd.payloads)
7020                 return result;
7021
7022         cmd.number_of_payloads = num;
7023         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7024         cmd.speed = 100;
7025
7026         for (i = 0; i < num; i++) {
7027                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7028                 cmd.payloads[i].address = msgs[i].addr;
7029                 cmd.payloads[i].length = msgs[i].len;
7030                 cmd.payloads[i].data = msgs[i].buf;
7031         }
7032
7033         if (dc_submit_i2c(
7034                         ddc_service->ctx->dc,
7035                         ddc_service->ddc_pin->hw_info.ddc_channel,
7036                         &cmd))
7037                 result = num;
7038
7039         kfree(cmd.payloads);
7040         return result;
7041 }
7042
7043 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7044 {
7045         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7046 }
7047
7048 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7049         .master_xfer = amdgpu_dm_i2c_xfer,
7050         .functionality = amdgpu_dm_i2c_func,
7051 };
7052
7053 static struct amdgpu_i2c_adapter *
7054 create_i2c(struct ddc_service *ddc_service,
7055            int link_index,
7056            int *res)
7057 {
7058         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7059         struct amdgpu_i2c_adapter *i2c;
7060
7061         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7062         if (!i2c)
7063                 return NULL;
7064         i2c->base.owner = THIS_MODULE;
7065         i2c->base.class = I2C_CLASS_DDC;
7066         i2c->base.dev.parent = &adev->pdev->dev;
7067         i2c->base.algo = &amdgpu_dm_i2c_algo;
7068         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7069         i2c_set_adapdata(&i2c->base, i2c);
7070         i2c->ddc_service = ddc_service;
7071         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7072
7073         return i2c;
7074 }
7075
7076
7077 /*
7078  * Note: this function assumes that dc_link_detect() was called for the
7079  * dc_link which will be represented by this aconnector.
7080  */
7081 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7082                                     struct amdgpu_dm_connector *aconnector,
7083                                     uint32_t link_index,
7084                                     struct amdgpu_encoder *aencoder)
7085 {
7086         int res = 0;
7087         int connector_type;
7088         struct dc *dc = dm->dc;
7089         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7090         struct amdgpu_i2c_adapter *i2c;
7091
7092         link->priv = aconnector;
7093
7094         DRM_DEBUG_DRIVER("%s()\n", __func__);
7095
7096         i2c = create_i2c(link->ddc, link->link_index, &res);
7097         if (!i2c) {
7098                 DRM_ERROR("Failed to create i2c adapter data\n");
7099                 return -ENOMEM;
7100         }
7101
7102         aconnector->i2c = i2c;
7103         res = i2c_add_adapter(&i2c->base);
7104
7105         if (res) {
7106                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7107                 goto out_free;
7108         }
7109
7110         connector_type = to_drm_connector_type(link->connector_signal);
7111
7112         res = drm_connector_init_with_ddc(
7113                         dm->ddev,
7114                         &aconnector->base,
7115                         &amdgpu_dm_connector_funcs,
7116                         connector_type,
7117                         &i2c->base);
7118
7119         if (res) {
7120                 DRM_ERROR("connector_init failed\n");
7121                 aconnector->connector_id = -1;
7122                 goto out_free;
7123         }
7124
7125         drm_connector_helper_add(
7126                         &aconnector->base,
7127                         &amdgpu_dm_connector_helper_funcs);
7128
7129         amdgpu_dm_connector_init_helper(
7130                 dm,
7131                 aconnector,
7132                 connector_type,
7133                 link,
7134                 link_index);
7135
7136         drm_connector_attach_encoder(
7137                 &aconnector->base, &aencoder->base);
7138
7139         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7140                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7141                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7142
7143 out_free:
7144         if (res) {
7145                 kfree(i2c);
7146                 aconnector->i2c = NULL;
7147         }
7148         return res;
7149 }
7150
7151 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7152 {
7153         switch (adev->mode_info.num_crtc) {
7154         case 1:
7155                 return 0x1;
7156         case 2:
7157                 return 0x3;
7158         case 3:
7159                 return 0x7;
7160         case 4:
7161                 return 0xf;
7162         case 5:
7163                 return 0x1f;
7164         case 6:
7165         default:
7166                 return 0x3f;
7167         }
7168 }
7169
7170 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7171                                   struct amdgpu_encoder *aencoder,
7172                                   uint32_t link_index)
7173 {
7174         struct amdgpu_device *adev = drm_to_adev(dev);
7175
7176         int res = drm_encoder_init(dev,
7177                                    &aencoder->base,
7178                                    &amdgpu_dm_encoder_funcs,
7179                                    DRM_MODE_ENCODER_TMDS,
7180                                    NULL);
7181
7182         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7183
7184         if (!res)
7185                 aencoder->encoder_id = link_index;
7186         else
7187                 aencoder->encoder_id = -1;
7188
7189         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7190
7191         return res;
7192 }
7193
7194 static void manage_dm_interrupts(struct amdgpu_device *adev,
7195                                  struct amdgpu_crtc *acrtc,
7196                                  bool enable)
7197 {
7198         /*
7199          * We have no guarantee that the frontend index maps to the same
7200          * backend index - some even map to more than one.
7201          *
7202          * TODO: Use a different interrupt or check DC itself for the mapping.
7203          */
7204         int irq_type =
7205                 amdgpu_display_crtc_idx_to_irq_type(
7206                         adev,
7207                         acrtc->crtc_id);
7208
7209         if (enable) {
7210                 drm_crtc_vblank_on(&acrtc->base);
7211                 amdgpu_irq_get(
7212                         adev,
7213                         &adev->pageflip_irq,
7214                         irq_type);
7215         } else {
7216
7217                 amdgpu_irq_put(
7218                         adev,
7219                         &adev->pageflip_irq,
7220                         irq_type);
7221                 drm_crtc_vblank_off(&acrtc->base);
7222         }
7223 }
7224
7225 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7226                                       struct amdgpu_crtc *acrtc)
7227 {
7228         int irq_type =
7229                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7230
7231         /**
7232          * This reads the current state for the IRQ and force reapplies
7233          * the setting to hardware.
7234          */
7235         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7236 }
7237
7238 static bool
7239 is_scaling_state_different(const struct dm_connector_state *dm_state,
7240                            const struct dm_connector_state *old_dm_state)
7241 {
7242         if (dm_state->scaling != old_dm_state->scaling)
7243                 return true;
7244         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7245                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7246                         return true;
7247         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7248                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7249                         return true;
7250         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7251                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7252                 return true;
7253         return false;
7254 }
7255
7256 #ifdef CONFIG_DRM_AMD_DC_HDCP
7257 static bool is_content_protection_different(struct drm_connector_state *state,
7258                                             const struct drm_connector_state *old_state,
7259                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7260 {
7261         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7262         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7263
7264         /* Handle: Type0/1 change */
7265         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7266             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7267                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7268                 return true;
7269         }
7270
7271         /* CP is being re enabled, ignore this
7272          *
7273          * Handles:     ENABLED -> DESIRED
7274          */
7275         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7276             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7277                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7278                 return false;
7279         }
7280
7281         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7282          *
7283          * Handles:     UNDESIRED -> ENABLED
7284          */
7285         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7286             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7287                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7288
7289         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7290          * hot-plug, headless s3, dpms
7291          *
7292          * Handles:     DESIRED -> DESIRED (Special case)
7293          */
7294         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7295             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7296                 dm_con_state->update_hdcp = false;
7297                 return true;
7298         }
7299
7300         /*
7301          * Handles:     UNDESIRED -> UNDESIRED
7302          *              DESIRED -> DESIRED
7303          *              ENABLED -> ENABLED
7304          */
7305         if (old_state->content_protection == state->content_protection)
7306                 return false;
7307
7308         /*
7309          * Handles:     UNDESIRED -> DESIRED
7310          *              DESIRED -> UNDESIRED
7311          *              ENABLED -> UNDESIRED
7312          */
7313         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7314                 return true;
7315
7316         /*
7317          * Handles:     DESIRED -> ENABLED
7318          */
7319         return false;
7320 }
7321
7322 #endif
7323 static void remove_stream(struct amdgpu_device *adev,
7324                           struct amdgpu_crtc *acrtc,
7325                           struct dc_stream_state *stream)
7326 {
7327         /* this is the update mode case */
7328
7329         acrtc->otg_inst = -1;
7330         acrtc->enabled = false;
7331 }
7332
7333 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7334                                struct dc_cursor_position *position)
7335 {
7336         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7337         int x, y;
7338         int xorigin = 0, yorigin = 0;
7339
7340         position->enable = false;
7341         position->x = 0;
7342         position->y = 0;
7343
7344         if (!crtc || !plane->state->fb)
7345                 return 0;
7346
7347         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7348             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7349                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7350                           __func__,
7351                           plane->state->crtc_w,
7352                           plane->state->crtc_h);
7353                 return -EINVAL;
7354         }
7355
7356         x = plane->state->crtc_x;
7357         y = plane->state->crtc_y;
7358
7359         if (x <= -amdgpu_crtc->max_cursor_width ||
7360             y <= -amdgpu_crtc->max_cursor_height)
7361                 return 0;
7362
7363         if (x < 0) {
7364                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7365                 x = 0;
7366         }
7367         if (y < 0) {
7368                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7369                 y = 0;
7370         }
7371         position->enable = true;
7372         position->translate_by_source = true;
7373         position->x = x;
7374         position->y = y;
7375         position->x_hotspot = xorigin;
7376         position->y_hotspot = yorigin;
7377
7378         return 0;
7379 }
7380
7381 static void handle_cursor_update(struct drm_plane *plane,
7382                                  struct drm_plane_state *old_plane_state)
7383 {
7384         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7385         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7386         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7387         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7388         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7389         uint64_t address = afb ? afb->address : 0;
7390         struct dc_cursor_position position;
7391         struct dc_cursor_attributes attributes;
7392         int ret;
7393
7394         if (!plane->state->fb && !old_plane_state->fb)
7395                 return;
7396
7397         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7398                          __func__,
7399                          amdgpu_crtc->crtc_id,
7400                          plane->state->crtc_w,
7401                          plane->state->crtc_h);
7402
7403         ret = get_cursor_position(plane, crtc, &position);
7404         if (ret)
7405                 return;
7406
7407         if (!position.enable) {
7408                 /* turn off cursor */
7409                 if (crtc_state && crtc_state->stream) {
7410                         mutex_lock(&adev->dm.dc_lock);
7411                         dc_stream_set_cursor_position(crtc_state->stream,
7412                                                       &position);
7413                         mutex_unlock(&adev->dm.dc_lock);
7414                 }
7415                 return;
7416         }
7417
7418         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7419         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7420
7421         memset(&attributes, 0, sizeof(attributes));
7422         attributes.address.high_part = upper_32_bits(address);
7423         attributes.address.low_part  = lower_32_bits(address);
7424         attributes.width             = plane->state->crtc_w;
7425         attributes.height            = plane->state->crtc_h;
7426         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7427         attributes.rotation_angle    = 0;
7428         attributes.attribute_flags.value = 0;
7429
7430         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7431
7432         if (crtc_state->stream) {
7433                 mutex_lock(&adev->dm.dc_lock);
7434                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7435                                                          &attributes))
7436                         DRM_ERROR("DC failed to set cursor attributes\n");
7437
7438                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7439                                                    &position))
7440                         DRM_ERROR("DC failed to set cursor position\n");
7441                 mutex_unlock(&adev->dm.dc_lock);
7442         }
7443 }
7444
7445 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7446 {
7447
7448         assert_spin_locked(&acrtc->base.dev->event_lock);
7449         WARN_ON(acrtc->event);
7450
7451         acrtc->event = acrtc->base.state->event;
7452
7453         /* Set the flip status */
7454         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7455
7456         /* Mark this event as consumed */
7457         acrtc->base.state->event = NULL;
7458
7459         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7460                                                  acrtc->crtc_id);
7461 }
7462
7463 static void update_freesync_state_on_stream(
7464         struct amdgpu_display_manager *dm,
7465         struct dm_crtc_state *new_crtc_state,
7466         struct dc_stream_state *new_stream,
7467         struct dc_plane_state *surface,
7468         u32 flip_timestamp_in_us)
7469 {
7470         struct mod_vrr_params vrr_params;
7471         struct dc_info_packet vrr_infopacket = {0};
7472         struct amdgpu_device *adev = dm->adev;
7473         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7474         unsigned long flags;
7475
7476         if (!new_stream)
7477                 return;
7478
7479         /*
7480          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7481          * For now it's sufficient to just guard against these conditions.
7482          */
7483
7484         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7485                 return;
7486
7487         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7488         vrr_params = acrtc->dm_irq_params.vrr_params;
7489
7490         if (surface) {
7491                 mod_freesync_handle_preflip(
7492                         dm->freesync_module,
7493                         surface,
7494                         new_stream,
7495                         flip_timestamp_in_us,
7496                         &vrr_params);
7497
7498                 if (adev->family < AMDGPU_FAMILY_AI &&
7499                     amdgpu_dm_vrr_active(new_crtc_state)) {
7500                         mod_freesync_handle_v_update(dm->freesync_module,
7501                                                      new_stream, &vrr_params);
7502
7503                         /* Need to call this before the frame ends. */
7504                         dc_stream_adjust_vmin_vmax(dm->dc,
7505                                                    new_crtc_state->stream,
7506                                                    &vrr_params.adjust);
7507                 }
7508         }
7509
7510         mod_freesync_build_vrr_infopacket(
7511                 dm->freesync_module,
7512                 new_stream,
7513                 &vrr_params,
7514                 PACKET_TYPE_VRR,
7515                 TRANSFER_FUNC_UNKNOWN,
7516                 &vrr_infopacket);
7517
7518         new_crtc_state->freesync_timing_changed |=
7519                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7520                         &vrr_params.adjust,
7521                         sizeof(vrr_params.adjust)) != 0);
7522
7523         new_crtc_state->freesync_vrr_info_changed |=
7524                 (memcmp(&new_crtc_state->vrr_infopacket,
7525                         &vrr_infopacket,
7526                         sizeof(vrr_infopacket)) != 0);
7527
7528         acrtc->dm_irq_params.vrr_params = vrr_params;
7529         new_crtc_state->vrr_infopacket = vrr_infopacket;
7530
7531         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7532         new_stream->vrr_infopacket = vrr_infopacket;
7533
7534         if (new_crtc_state->freesync_vrr_info_changed)
7535                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7536                               new_crtc_state->base.crtc->base.id,
7537                               (int)new_crtc_state->base.vrr_enabled,
7538                               (int)vrr_params.state);
7539
7540         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7541 }
7542
7543 static void update_stream_irq_parameters(
7544         struct amdgpu_display_manager *dm,
7545         struct dm_crtc_state *new_crtc_state)
7546 {
7547         struct dc_stream_state *new_stream = new_crtc_state->stream;
7548         struct mod_vrr_params vrr_params;
7549         struct mod_freesync_config config = new_crtc_state->freesync_config;
7550         struct amdgpu_device *adev = dm->adev;
7551         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7552         unsigned long flags;
7553
7554         if (!new_stream)
7555                 return;
7556
7557         /*
7558          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7559          * For now it's sufficient to just guard against these conditions.
7560          */
7561         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7562                 return;
7563
7564         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7565         vrr_params = acrtc->dm_irq_params.vrr_params;
7566
7567         if (new_crtc_state->vrr_supported &&
7568             config.min_refresh_in_uhz &&
7569             config.max_refresh_in_uhz) {
7570                 config.state = new_crtc_state->base.vrr_enabled ?
7571                         VRR_STATE_ACTIVE_VARIABLE :
7572                         VRR_STATE_INACTIVE;
7573         } else {
7574                 config.state = VRR_STATE_UNSUPPORTED;
7575         }
7576
7577         mod_freesync_build_vrr_params(dm->freesync_module,
7578                                       new_stream,
7579                                       &config, &vrr_params);
7580
7581         new_crtc_state->freesync_timing_changed |=
7582                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7583                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7584
7585         new_crtc_state->freesync_config = config;
7586         /* Copy state for access from DM IRQ handler */
7587         acrtc->dm_irq_params.freesync_config = config;
7588         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7589         acrtc->dm_irq_params.vrr_params = vrr_params;
7590         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7591 }
7592
7593 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7594                                             struct dm_crtc_state *new_state)
7595 {
7596         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7597         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7598
7599         if (!old_vrr_active && new_vrr_active) {
7600                 /* Transition VRR inactive -> active:
7601                  * While VRR is active, we must not disable vblank irq, as a
7602                  * reenable after disable would compute bogus vblank/pflip
7603                  * timestamps if it likely happened inside display front-porch.
7604                  *
7605                  * We also need vupdate irq for the actual core vblank handling
7606                  * at end of vblank.
7607                  */
7608                 dm_set_vupdate_irq(new_state->base.crtc, true);
7609                 drm_crtc_vblank_get(new_state->base.crtc);
7610                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7611                                  __func__, new_state->base.crtc->base.id);
7612         } else if (old_vrr_active && !new_vrr_active) {
7613                 /* Transition VRR active -> inactive:
7614                  * Allow vblank irq disable again for fixed refresh rate.
7615                  */
7616                 dm_set_vupdate_irq(new_state->base.crtc, false);
7617                 drm_crtc_vblank_put(new_state->base.crtc);
7618                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7619                                  __func__, new_state->base.crtc->base.id);
7620         }
7621 }
7622
7623 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7624 {
7625         struct drm_plane *plane;
7626         struct drm_plane_state *old_plane_state, *new_plane_state;
7627         int i;
7628
7629         /*
7630          * TODO: Make this per-stream so we don't issue redundant updates for
7631          * commits with multiple streams.
7632          */
7633         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7634                                        new_plane_state, i)
7635                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7636                         handle_cursor_update(plane, old_plane_state);
7637 }
7638
7639 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7640                                     struct dc_state *dc_state,
7641                                     struct drm_device *dev,
7642                                     struct amdgpu_display_manager *dm,
7643                                     struct drm_crtc *pcrtc,
7644                                     bool wait_for_vblank)
7645 {
7646         uint32_t i;
7647         uint64_t timestamp_ns;
7648         struct drm_plane *plane;
7649         struct drm_plane_state *old_plane_state, *new_plane_state;
7650         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7651         struct drm_crtc_state *new_pcrtc_state =
7652                         drm_atomic_get_new_crtc_state(state, pcrtc);
7653         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7654         struct dm_crtc_state *dm_old_crtc_state =
7655                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7656         int planes_count = 0, vpos, hpos;
7657         long r;
7658         unsigned long flags;
7659         struct amdgpu_bo *abo;
7660         uint32_t target_vblank, last_flip_vblank;
7661         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7662         bool pflip_present = false;
7663         struct {
7664                 struct dc_surface_update surface_updates[MAX_SURFACES];
7665                 struct dc_plane_info plane_infos[MAX_SURFACES];
7666                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7667                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7668                 struct dc_stream_update stream_update;
7669         } *bundle;
7670
7671         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7672
7673         if (!bundle) {
7674                 dm_error("Failed to allocate update bundle\n");
7675                 goto cleanup;
7676         }
7677
7678         /*
7679          * Disable the cursor first if we're disabling all the planes.
7680          * It'll remain on the screen after the planes are re-enabled
7681          * if we don't.
7682          */
7683         if (acrtc_state->active_planes == 0)
7684                 amdgpu_dm_commit_cursors(state);
7685
7686         /* update planes when needed */
7687         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7688                 struct drm_crtc *crtc = new_plane_state->crtc;
7689                 struct drm_crtc_state *new_crtc_state;
7690                 struct drm_framebuffer *fb = new_plane_state->fb;
7691                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7692                 bool plane_needs_flip;
7693                 struct dc_plane_state *dc_plane;
7694                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7695
7696                 /* Cursor plane is handled after stream updates */
7697                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7698                         continue;
7699
7700                 if (!fb || !crtc || pcrtc != crtc)
7701                         continue;
7702
7703                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7704                 if (!new_crtc_state->active)
7705                         continue;
7706
7707                 dc_plane = dm_new_plane_state->dc_state;
7708
7709                 bundle->surface_updates[planes_count].surface = dc_plane;
7710                 if (new_pcrtc_state->color_mgmt_changed) {
7711                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7712                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7713                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7714                 }
7715
7716                 fill_dc_scaling_info(new_plane_state,
7717                                      &bundle->scaling_infos[planes_count]);
7718
7719                 bundle->surface_updates[planes_count].scaling_info =
7720                         &bundle->scaling_infos[planes_count];
7721
7722                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7723
7724                 pflip_present = pflip_present || plane_needs_flip;
7725
7726                 if (!plane_needs_flip) {
7727                         planes_count += 1;
7728                         continue;
7729                 }
7730
7731                 abo = gem_to_amdgpu_bo(fb->obj[0]);
7732
7733                 /*
7734                  * Wait for all fences on this FB. Do limited wait to avoid
7735                  * deadlock during GPU reset when this fence will not signal
7736                  * but we hold reservation lock for the BO.
7737                  */
7738                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7739                                                         false,
7740                                                         msecs_to_jiffies(5000));
7741                 if (unlikely(r <= 0))
7742                         DRM_ERROR("Waiting for fences timed out!");
7743
7744                 fill_dc_plane_info_and_addr(
7745                         dm->adev, new_plane_state,
7746                         afb->tiling_flags,
7747                         &bundle->plane_infos[planes_count],
7748                         &bundle->flip_addrs[planes_count].address,
7749                         afb->tmz_surface, false);
7750
7751                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7752                                  new_plane_state->plane->index,
7753                                  bundle->plane_infos[planes_count].dcc.enable);
7754
7755                 bundle->surface_updates[planes_count].plane_info =
7756                         &bundle->plane_infos[planes_count];
7757
7758                 /*
7759                  * Only allow immediate flips for fast updates that don't
7760                  * change FB pitch, DCC state, rotation or mirroing.
7761                  */
7762                 bundle->flip_addrs[planes_count].flip_immediate =
7763                         crtc->state->async_flip &&
7764                         acrtc_state->update_type == UPDATE_TYPE_FAST;
7765
7766                 timestamp_ns = ktime_get_ns();
7767                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7768                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7769                 bundle->surface_updates[planes_count].surface = dc_plane;
7770
7771                 if (!bundle->surface_updates[planes_count].surface) {
7772                         DRM_ERROR("No surface for CRTC: id=%d\n",
7773                                         acrtc_attach->crtc_id);
7774                         continue;
7775                 }
7776
7777                 if (plane == pcrtc->primary)
7778                         update_freesync_state_on_stream(
7779                                 dm,
7780                                 acrtc_state,
7781                                 acrtc_state->stream,
7782                                 dc_plane,
7783                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7784
7785                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7786                                  __func__,
7787                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7788                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7789
7790                 planes_count += 1;
7791
7792         }
7793
7794         if (pflip_present) {
7795                 if (!vrr_active) {
7796                         /* Use old throttling in non-vrr fixed refresh rate mode
7797                          * to keep flip scheduling based on target vblank counts
7798                          * working in a backwards compatible way, e.g., for
7799                          * clients using the GLX_OML_sync_control extension or
7800                          * DRI3/Present extension with defined target_msc.
7801                          */
7802                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7803                 }
7804                 else {
7805                         /* For variable refresh rate mode only:
7806                          * Get vblank of last completed flip to avoid > 1 vrr
7807                          * flips per video frame by use of throttling, but allow
7808                          * flip programming anywhere in the possibly large
7809                          * variable vrr vblank interval for fine-grained flip
7810                          * timing control and more opportunity to avoid stutter
7811                          * on late submission of flips.
7812                          */
7813                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7814                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7815                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7816                 }
7817
7818                 target_vblank = last_flip_vblank + wait_for_vblank;
7819
7820                 /*
7821                  * Wait until we're out of the vertical blank period before the one
7822                  * targeted by the flip
7823                  */
7824                 while ((acrtc_attach->enabled &&
7825                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7826                                                             0, &vpos, &hpos, NULL,
7827                                                             NULL, &pcrtc->hwmode)
7828                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7829                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7830                         (int)(target_vblank -
7831                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7832                         usleep_range(1000, 1100);
7833                 }
7834
7835                 /**
7836                  * Prepare the flip event for the pageflip interrupt to handle.
7837                  *
7838                  * This only works in the case where we've already turned on the
7839                  * appropriate hardware blocks (eg. HUBP) so in the transition case
7840                  * from 0 -> n planes we have to skip a hardware generated event
7841                  * and rely on sending it from software.
7842                  */
7843                 if (acrtc_attach->base.state->event &&
7844                     acrtc_state->active_planes > 0) {
7845                         drm_crtc_vblank_get(pcrtc);
7846
7847                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7848
7849                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7850                         prepare_flip_isr(acrtc_attach);
7851
7852                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7853                 }
7854
7855                 if (acrtc_state->stream) {
7856                         if (acrtc_state->freesync_vrr_info_changed)
7857                                 bundle->stream_update.vrr_infopacket =
7858                                         &acrtc_state->stream->vrr_infopacket;
7859                 }
7860         }
7861
7862         /* Update the planes if changed or disable if we don't have any. */
7863         if ((planes_count || acrtc_state->active_planes == 0) &&
7864                 acrtc_state->stream) {
7865                 bundle->stream_update.stream = acrtc_state->stream;
7866                 if (new_pcrtc_state->mode_changed) {
7867                         bundle->stream_update.src = acrtc_state->stream->src;
7868                         bundle->stream_update.dst = acrtc_state->stream->dst;
7869                 }
7870
7871                 if (new_pcrtc_state->color_mgmt_changed) {
7872                         /*
7873                          * TODO: This isn't fully correct since we've actually
7874                          * already modified the stream in place.
7875                          */
7876                         bundle->stream_update.gamut_remap =
7877                                 &acrtc_state->stream->gamut_remap_matrix;
7878                         bundle->stream_update.output_csc_transform =
7879                                 &acrtc_state->stream->csc_color_matrix;
7880                         bundle->stream_update.out_transfer_func =
7881                                 acrtc_state->stream->out_transfer_func;
7882                 }
7883
7884                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7885                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7886                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7887
7888                 /*
7889                  * If FreeSync state on the stream has changed then we need to
7890                  * re-adjust the min/max bounds now that DC doesn't handle this
7891                  * as part of commit.
7892                  */
7893                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7894                     amdgpu_dm_vrr_active(acrtc_state)) {
7895                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7896                         dc_stream_adjust_vmin_vmax(
7897                                 dm->dc, acrtc_state->stream,
7898                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7899                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7900                 }
7901                 mutex_lock(&dm->dc_lock);
7902                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7903                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7904                         amdgpu_dm_psr_disable(acrtc_state->stream);
7905
7906                 dc_commit_updates_for_stream(dm->dc,
7907                                                      bundle->surface_updates,
7908                                                      planes_count,
7909                                                      acrtc_state->stream,
7910                                                      &bundle->stream_update,
7911                                                      dc_state);
7912
7913                 /**
7914                  * Enable or disable the interrupts on the backend.
7915                  *
7916                  * Most pipes are put into power gating when unused.
7917                  *
7918                  * When power gating is enabled on a pipe we lose the
7919                  * interrupt enablement state when power gating is disabled.
7920                  *
7921                  * So we need to update the IRQ control state in hardware
7922                  * whenever the pipe turns on (since it could be previously
7923                  * power gated) or off (since some pipes can't be power gated
7924                  * on some ASICs).
7925                  */
7926                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7927                         dm_update_pflip_irq_state(drm_to_adev(dev),
7928                                                   acrtc_attach);
7929
7930                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7931                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7932                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7933                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
7934                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7935                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7936                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7937                         amdgpu_dm_psr_enable(acrtc_state->stream);
7938                 }
7939
7940                 mutex_unlock(&dm->dc_lock);
7941         }
7942
7943         /*
7944          * Update cursor state *after* programming all the planes.
7945          * This avoids redundant programming in the case where we're going
7946          * to be disabling a single plane - those pipes are being disabled.
7947          */
7948         if (acrtc_state->active_planes)
7949                 amdgpu_dm_commit_cursors(state);
7950
7951 cleanup:
7952         kfree(bundle);
7953 }
7954
7955 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7956                                    struct drm_atomic_state *state)
7957 {
7958         struct amdgpu_device *adev = drm_to_adev(dev);
7959         struct amdgpu_dm_connector *aconnector;
7960         struct drm_connector *connector;
7961         struct drm_connector_state *old_con_state, *new_con_state;
7962         struct drm_crtc_state *new_crtc_state;
7963         struct dm_crtc_state *new_dm_crtc_state;
7964         const struct dc_stream_status *status;
7965         int i, inst;
7966
7967         /* Notify device removals. */
7968         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7969                 if (old_con_state->crtc != new_con_state->crtc) {
7970                         /* CRTC changes require notification. */
7971                         goto notify;
7972                 }
7973
7974                 if (!new_con_state->crtc)
7975                         continue;
7976
7977                 new_crtc_state = drm_atomic_get_new_crtc_state(
7978                         state, new_con_state->crtc);
7979
7980                 if (!new_crtc_state)
7981                         continue;
7982
7983                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7984                         continue;
7985
7986         notify:
7987                 aconnector = to_amdgpu_dm_connector(connector);
7988
7989                 mutex_lock(&adev->dm.audio_lock);
7990                 inst = aconnector->audio_inst;
7991                 aconnector->audio_inst = -1;
7992                 mutex_unlock(&adev->dm.audio_lock);
7993
7994                 amdgpu_dm_audio_eld_notify(adev, inst);
7995         }
7996
7997         /* Notify audio device additions. */
7998         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7999                 if (!new_con_state->crtc)
8000                         continue;
8001
8002                 new_crtc_state = drm_atomic_get_new_crtc_state(
8003                         state, new_con_state->crtc);
8004
8005                 if (!new_crtc_state)
8006                         continue;
8007
8008                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8009                         continue;
8010
8011                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8012                 if (!new_dm_crtc_state->stream)
8013                         continue;
8014
8015                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8016                 if (!status)
8017                         continue;
8018
8019                 aconnector = to_amdgpu_dm_connector(connector);
8020
8021                 mutex_lock(&adev->dm.audio_lock);
8022                 inst = status->audio_inst;
8023                 aconnector->audio_inst = inst;
8024                 mutex_unlock(&adev->dm.audio_lock);
8025
8026                 amdgpu_dm_audio_eld_notify(adev, inst);
8027         }
8028 }
8029
8030 /*
8031  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8032  * @crtc_state: the DRM CRTC state
8033  * @stream_state: the DC stream state.
8034  *
8035  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8036  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8037  */
8038 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8039                                                 struct dc_stream_state *stream_state)
8040 {
8041         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8042 }
8043
8044 /**
8045  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8046  * @state: The atomic state to commit
8047  *
8048  * This will tell DC to commit the constructed DC state from atomic_check,
8049  * programming the hardware. Any failures here implies a hardware failure, since
8050  * atomic check should have filtered anything non-kosher.
8051  */
8052 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8053 {
8054         struct drm_device *dev = state->dev;
8055         struct amdgpu_device *adev = drm_to_adev(dev);
8056         struct amdgpu_display_manager *dm = &adev->dm;
8057         struct dm_atomic_state *dm_state;
8058         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8059         uint32_t i, j;
8060         struct drm_crtc *crtc;
8061         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8062         unsigned long flags;
8063         bool wait_for_vblank = true;
8064         struct drm_connector *connector;
8065         struct drm_connector_state *old_con_state, *new_con_state;
8066         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8067         int crtc_disable_count = 0;
8068         bool mode_set_reset_required = false;
8069
8070         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8071
8072         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8073
8074         dm_state = dm_atomic_get_new_state(state);
8075         if (dm_state && dm_state->context) {
8076                 dc_state = dm_state->context;
8077         } else {
8078                 /* No state changes, retain current state. */
8079                 dc_state_temp = dc_create_state(dm->dc);
8080                 ASSERT(dc_state_temp);
8081                 dc_state = dc_state_temp;
8082                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8083         }
8084
8085         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8086                                        new_crtc_state, i) {
8087                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8088
8089                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8090
8091                 if (old_crtc_state->active &&
8092                     (!new_crtc_state->active ||
8093                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8094                         manage_dm_interrupts(adev, acrtc, false);
8095                         dc_stream_release(dm_old_crtc_state->stream);
8096                 }
8097         }
8098
8099         drm_atomic_helper_calc_timestamping_constants(state);
8100
8101         /* update changed items */
8102         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8103                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8104
8105                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8106                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8107
8108                 DRM_DEBUG_DRIVER(
8109                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8110                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8111                         "connectors_changed:%d\n",
8112                         acrtc->crtc_id,
8113                         new_crtc_state->enable,
8114                         new_crtc_state->active,
8115                         new_crtc_state->planes_changed,
8116                         new_crtc_state->mode_changed,
8117                         new_crtc_state->active_changed,
8118                         new_crtc_state->connectors_changed);
8119
8120                 /* Disable cursor if disabling crtc */
8121                 if (old_crtc_state->active && !new_crtc_state->active) {
8122                         struct dc_cursor_position position;
8123
8124                         memset(&position, 0, sizeof(position));
8125                         mutex_lock(&dm->dc_lock);
8126                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8127                         mutex_unlock(&dm->dc_lock);
8128                 }
8129
8130                 /* Copy all transient state flags into dc state */
8131                 if (dm_new_crtc_state->stream) {
8132                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8133                                                             dm_new_crtc_state->stream);
8134                 }
8135
8136                 /* handles headless hotplug case, updating new_state and
8137                  * aconnector as needed
8138                  */
8139
8140                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8141
8142                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8143
8144                         if (!dm_new_crtc_state->stream) {
8145                                 /*
8146                                  * this could happen because of issues with
8147                                  * userspace notifications delivery.
8148                                  * In this case userspace tries to set mode on
8149                                  * display which is disconnected in fact.
8150                                  * dc_sink is NULL in this case on aconnector.
8151                                  * We expect reset mode will come soon.
8152                                  *
8153                                  * This can also happen when unplug is done
8154                                  * during resume sequence ended
8155                                  *
8156                                  * In this case, we want to pretend we still
8157                                  * have a sink to keep the pipe running so that
8158                                  * hw state is consistent with the sw state
8159                                  */
8160                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8161                                                 __func__, acrtc->base.base.id);
8162                                 continue;
8163                         }
8164
8165                         if (dm_old_crtc_state->stream)
8166                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8167
8168                         pm_runtime_get_noresume(dev->dev);
8169
8170                         acrtc->enabled = true;
8171                         acrtc->hw_mode = new_crtc_state->mode;
8172                         crtc->hwmode = new_crtc_state->mode;
8173                         mode_set_reset_required = true;
8174                 } else if (modereset_required(new_crtc_state)) {
8175                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8176                         /* i.e. reset mode */
8177                         if (dm_old_crtc_state->stream)
8178                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8179                         mode_set_reset_required = true;
8180                 }
8181         } /* for_each_crtc_in_state() */
8182
8183         if (dc_state) {
8184                 /* if there mode set or reset, disable eDP PSR */
8185                 if (mode_set_reset_required)
8186                         amdgpu_dm_psr_disable_all(dm);
8187
8188                 dm_enable_per_frame_crtc_master_sync(dc_state);
8189                 mutex_lock(&dm->dc_lock);
8190                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8191                 mutex_unlock(&dm->dc_lock);
8192         }
8193
8194         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8195                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8196
8197                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8198
8199                 if (dm_new_crtc_state->stream != NULL) {
8200                         const struct dc_stream_status *status =
8201                                         dc_stream_get_status(dm_new_crtc_state->stream);
8202
8203                         if (!status)
8204                                 status = dc_stream_get_status_from_state(dc_state,
8205                                                                          dm_new_crtc_state->stream);
8206                         if (!status)
8207                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8208                         else
8209                                 acrtc->otg_inst = status->primary_otg_inst;
8210                 }
8211         }
8212 #ifdef CONFIG_DRM_AMD_DC_HDCP
8213         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8214                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8215                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8216                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8217
8218                 new_crtc_state = NULL;
8219
8220                 if (acrtc)
8221                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8222
8223                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8224
8225                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8226                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8227                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8228                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8229                         dm_new_con_state->update_hdcp = true;
8230                         continue;
8231                 }
8232
8233                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8234                         hdcp_update_display(
8235                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8236                                 new_con_state->hdcp_content_type,
8237                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8238                                                                                                          : false);
8239         }
8240 #endif
8241
8242         /* Handle connector state changes */
8243         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8244                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8245                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8246                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8247                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8248                 struct dc_stream_update stream_update;
8249                 struct dc_info_packet hdr_packet;
8250                 struct dc_stream_status *status = NULL;
8251                 bool abm_changed, hdr_changed, scaling_changed;
8252
8253                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8254                 memset(&stream_update, 0, sizeof(stream_update));
8255
8256                 if (acrtc) {
8257                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8258                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8259                 }
8260
8261                 /* Skip any modesets/resets */
8262                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8263                         continue;
8264
8265                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8266                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8267
8268                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8269                                                              dm_old_con_state);
8270
8271                 abm_changed = dm_new_crtc_state->abm_level !=
8272                               dm_old_crtc_state->abm_level;
8273
8274                 hdr_changed =
8275                         is_hdr_metadata_different(old_con_state, new_con_state);
8276
8277                 if (!scaling_changed && !abm_changed && !hdr_changed)
8278                         continue;
8279
8280                 stream_update.stream = dm_new_crtc_state->stream;
8281                 if (scaling_changed) {
8282                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8283                                         dm_new_con_state, dm_new_crtc_state->stream);
8284
8285                         stream_update.src = dm_new_crtc_state->stream->src;
8286                         stream_update.dst = dm_new_crtc_state->stream->dst;
8287                 }
8288
8289                 if (abm_changed) {
8290                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8291
8292                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8293                 }
8294
8295                 if (hdr_changed) {
8296                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8297                         stream_update.hdr_static_metadata = &hdr_packet;
8298                 }
8299
8300                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8301                 WARN_ON(!status);
8302                 WARN_ON(!status->plane_count);
8303
8304                 /*
8305                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8306                  * Here we create an empty update on each plane.
8307                  * To fix this, DC should permit updating only stream properties.
8308                  */
8309                 for (j = 0; j < status->plane_count; j++)
8310                         dummy_updates[j].surface = status->plane_states[0];
8311
8312
8313                 mutex_lock(&dm->dc_lock);
8314                 dc_commit_updates_for_stream(dm->dc,
8315                                                      dummy_updates,
8316                                                      status->plane_count,
8317                                                      dm_new_crtc_state->stream,
8318                                                      &stream_update,
8319                                                      dc_state);
8320                 mutex_unlock(&dm->dc_lock);
8321         }
8322
8323         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8324         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8325                                       new_crtc_state, i) {
8326                 if (old_crtc_state->active && !new_crtc_state->active)
8327                         crtc_disable_count++;
8328
8329                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8330                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8331
8332                 /* For freesync config update on crtc state and params for irq */
8333                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8334
8335                 /* Handle vrr on->off / off->on transitions */
8336                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8337                                                 dm_new_crtc_state);
8338         }
8339
8340         /**
8341          * Enable interrupts for CRTCs that are newly enabled or went through
8342          * a modeset. It was intentionally deferred until after the front end
8343          * state was modified to wait until the OTG was on and so the IRQ
8344          * handlers didn't access stale or invalid state.
8345          */
8346         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8347                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8348
8349                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8350
8351                 if (new_crtc_state->active &&
8352                     (!old_crtc_state->active ||
8353                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8354                         dc_stream_retain(dm_new_crtc_state->stream);
8355                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8356                         manage_dm_interrupts(adev, acrtc, true);
8357
8358 #ifdef CONFIG_DEBUG_FS
8359                         /**
8360                          * Frontend may have changed so reapply the CRC capture
8361                          * settings for the stream.
8362                          */
8363                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8364
8365                         if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8366                                 amdgpu_dm_crtc_configure_crc_source(
8367                                         crtc, dm_new_crtc_state,
8368                                         dm_new_crtc_state->crc_src);
8369                         }
8370 #endif
8371                 }
8372         }
8373
8374         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8375                 if (new_crtc_state->async_flip)
8376                         wait_for_vblank = false;
8377
8378         /* update planes when needed per crtc*/
8379         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8380                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8381
8382                 if (dm_new_crtc_state->stream)
8383                         amdgpu_dm_commit_planes(state, dc_state, dev,
8384                                                 dm, crtc, wait_for_vblank);
8385         }
8386
8387         /* Update audio instances for each connector. */
8388         amdgpu_dm_commit_audio(dev, state);
8389
8390         /*
8391          * send vblank event on all events not handled in flip and
8392          * mark consumed event for drm_atomic_helper_commit_hw_done
8393          */
8394         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8395         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8396
8397                 if (new_crtc_state->event)
8398                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8399
8400                 new_crtc_state->event = NULL;
8401         }
8402         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8403
8404         /* Signal HW programming completion */
8405         drm_atomic_helper_commit_hw_done(state);
8406
8407         if (wait_for_vblank)
8408                 drm_atomic_helper_wait_for_flip_done(dev, state);
8409
8410         drm_atomic_helper_cleanup_planes(dev, state);
8411
8412         /* return the stolen vga memory back to VRAM */
8413         if (!adev->mman.keep_stolen_vga_memory)
8414                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8415         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8416
8417         /*
8418          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8419          * so we can put the GPU into runtime suspend if we're not driving any
8420          * displays anymore
8421          */
8422         for (i = 0; i < crtc_disable_count; i++)
8423                 pm_runtime_put_autosuspend(dev->dev);
8424         pm_runtime_mark_last_busy(dev->dev);
8425
8426         if (dc_state_temp)
8427                 dc_release_state(dc_state_temp);
8428 }
8429
8430
8431 static int dm_force_atomic_commit(struct drm_connector *connector)
8432 {
8433         int ret = 0;
8434         struct drm_device *ddev = connector->dev;
8435         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8436         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8437         struct drm_plane *plane = disconnected_acrtc->base.primary;
8438         struct drm_connector_state *conn_state;
8439         struct drm_crtc_state *crtc_state;
8440         struct drm_plane_state *plane_state;
8441
8442         if (!state)
8443                 return -ENOMEM;
8444
8445         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8446
8447         /* Construct an atomic state to restore previous display setting */
8448
8449         /*
8450          * Attach connectors to drm_atomic_state
8451          */
8452         conn_state = drm_atomic_get_connector_state(state, connector);
8453
8454         ret = PTR_ERR_OR_ZERO(conn_state);
8455         if (ret)
8456                 goto err;
8457
8458         /* Attach crtc to drm_atomic_state*/
8459         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8460
8461         ret = PTR_ERR_OR_ZERO(crtc_state);
8462         if (ret)
8463                 goto err;
8464
8465         /* force a restore */
8466         crtc_state->mode_changed = true;
8467
8468         /* Attach plane to drm_atomic_state */
8469         plane_state = drm_atomic_get_plane_state(state, plane);
8470
8471         ret = PTR_ERR_OR_ZERO(plane_state);
8472         if (ret)
8473                 goto err;
8474
8475
8476         /* Call commit internally with the state we just constructed */
8477         ret = drm_atomic_commit(state);
8478         if (!ret)
8479                 return 0;
8480
8481 err:
8482         DRM_ERROR("Restoring old state failed with %i\n", ret);
8483         drm_atomic_state_put(state);
8484
8485         return ret;
8486 }
8487
8488 /*
8489  * This function handles all cases when set mode does not come upon hotplug.
8490  * This includes when a display is unplugged then plugged back into the
8491  * same port and when running without usermode desktop manager supprot
8492  */
8493 void dm_restore_drm_connector_state(struct drm_device *dev,
8494                                     struct drm_connector *connector)
8495 {
8496         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8497         struct amdgpu_crtc *disconnected_acrtc;
8498         struct dm_crtc_state *acrtc_state;
8499
8500         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8501                 return;
8502
8503         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8504         if (!disconnected_acrtc)
8505                 return;
8506
8507         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8508         if (!acrtc_state->stream)
8509                 return;
8510
8511         /*
8512          * If the previous sink is not released and different from the current,
8513          * we deduce we are in a state where we can not rely on usermode call
8514          * to turn on the display, so we do it here
8515          */
8516         if (acrtc_state->stream->sink != aconnector->dc_sink)
8517                 dm_force_atomic_commit(&aconnector->base);
8518 }
8519
8520 /*
8521  * Grabs all modesetting locks to serialize against any blocking commits,
8522  * Waits for completion of all non blocking commits.
8523  */
8524 static int do_aquire_global_lock(struct drm_device *dev,
8525                                  struct drm_atomic_state *state)
8526 {
8527         struct drm_crtc *crtc;
8528         struct drm_crtc_commit *commit;
8529         long ret;
8530
8531         /*
8532          * Adding all modeset locks to aquire_ctx will
8533          * ensure that when the framework release it the
8534          * extra locks we are locking here will get released to
8535          */
8536         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8537         if (ret)
8538                 return ret;
8539
8540         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8541                 spin_lock(&crtc->commit_lock);
8542                 commit = list_first_entry_or_null(&crtc->commit_list,
8543                                 struct drm_crtc_commit, commit_entry);
8544                 if (commit)
8545                         drm_crtc_commit_get(commit);
8546                 spin_unlock(&crtc->commit_lock);
8547
8548                 if (!commit)
8549                         continue;
8550
8551                 /*
8552                  * Make sure all pending HW programming completed and
8553                  * page flips done
8554                  */
8555                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8556
8557                 if (ret > 0)
8558                         ret = wait_for_completion_interruptible_timeout(
8559                                         &commit->flip_done, 10*HZ);
8560
8561                 if (ret == 0)
8562                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8563                                   "timed out\n", crtc->base.id, crtc->name);
8564
8565                 drm_crtc_commit_put(commit);
8566         }
8567
8568         return ret < 0 ? ret : 0;
8569 }
8570
8571 static void get_freesync_config_for_crtc(
8572         struct dm_crtc_state *new_crtc_state,
8573         struct dm_connector_state *new_con_state)
8574 {
8575         struct mod_freesync_config config = {0};
8576         struct amdgpu_dm_connector *aconnector =
8577                         to_amdgpu_dm_connector(new_con_state->base.connector);
8578         struct drm_display_mode *mode = &new_crtc_state->base.mode;
8579         int vrefresh = drm_mode_vrefresh(mode);
8580
8581         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8582                                         vrefresh >= aconnector->min_vfreq &&
8583                                         vrefresh <= aconnector->max_vfreq;
8584
8585         if (new_crtc_state->vrr_supported) {
8586                 new_crtc_state->stream->ignore_msa_timing_param = true;
8587                 config.state = new_crtc_state->base.vrr_enabled ?
8588                                 VRR_STATE_ACTIVE_VARIABLE :
8589                                 VRR_STATE_INACTIVE;
8590                 config.min_refresh_in_uhz =
8591                                 aconnector->min_vfreq * 1000000;
8592                 config.max_refresh_in_uhz =
8593                                 aconnector->max_vfreq * 1000000;
8594                 config.vsif_supported = true;
8595                 config.btr = true;
8596         }
8597
8598         new_crtc_state->freesync_config = config;
8599 }
8600
8601 static void reset_freesync_config_for_crtc(
8602         struct dm_crtc_state *new_crtc_state)
8603 {
8604         new_crtc_state->vrr_supported = false;
8605
8606         memset(&new_crtc_state->vrr_infopacket, 0,
8607                sizeof(new_crtc_state->vrr_infopacket));
8608 }
8609
8610 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8611                                 struct drm_atomic_state *state,
8612                                 struct drm_crtc *crtc,
8613                                 struct drm_crtc_state *old_crtc_state,
8614                                 struct drm_crtc_state *new_crtc_state,
8615                                 bool enable,
8616                                 bool *lock_and_validation_needed)
8617 {
8618         struct dm_atomic_state *dm_state = NULL;
8619         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8620         struct dc_stream_state *new_stream;
8621         int ret = 0;
8622
8623         /*
8624          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8625          * update changed items
8626          */
8627         struct amdgpu_crtc *acrtc = NULL;
8628         struct amdgpu_dm_connector *aconnector = NULL;
8629         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8630         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8631
8632         new_stream = NULL;
8633
8634         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8635         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8636         acrtc = to_amdgpu_crtc(crtc);
8637         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8638
8639         /* TODO This hack should go away */
8640         if (aconnector && enable) {
8641                 /* Make sure fake sink is created in plug-in scenario */
8642                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8643                                                             &aconnector->base);
8644                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8645                                                             &aconnector->base);
8646
8647                 if (IS_ERR(drm_new_conn_state)) {
8648                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8649                         goto fail;
8650                 }
8651
8652                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8653                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8654
8655                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8656                         goto skip_modeset;
8657
8658                 new_stream = create_validate_stream_for_sink(aconnector,
8659                                                              &new_crtc_state->mode,
8660                                                              dm_new_conn_state,
8661                                                              dm_old_crtc_state->stream);
8662
8663                 /*
8664                  * we can have no stream on ACTION_SET if a display
8665                  * was disconnected during S3, in this case it is not an
8666                  * error, the OS will be updated after detection, and
8667                  * will do the right thing on next atomic commit
8668                  */
8669
8670                 if (!new_stream) {
8671                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8672                                         __func__, acrtc->base.base.id);
8673                         ret = -ENOMEM;
8674                         goto fail;
8675                 }
8676
8677                 /*
8678                  * TODO: Check VSDB bits to decide whether this should
8679                  * be enabled or not.
8680                  */
8681                 new_stream->triggered_crtc_reset.enabled =
8682                         dm->force_timing_sync;
8683
8684                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8685
8686                 ret = fill_hdr_info_packet(drm_new_conn_state,
8687                                            &new_stream->hdr_static_metadata);
8688                 if (ret)
8689                         goto fail;
8690
8691                 /*
8692                  * If we already removed the old stream from the context
8693                  * (and set the new stream to NULL) then we can't reuse
8694                  * the old stream even if the stream and scaling are unchanged.
8695                  * We'll hit the BUG_ON and black screen.
8696                  *
8697                  * TODO: Refactor this function to allow this check to work
8698                  * in all conditions.
8699                  */
8700                 if (dm_new_crtc_state->stream &&
8701                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8702                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8703                         new_crtc_state->mode_changed = false;
8704                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8705                                          new_crtc_state->mode_changed);
8706                 }
8707         }
8708
8709         /* mode_changed flag may get updated above, need to check again */
8710         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8711                 goto skip_modeset;
8712
8713         DRM_DEBUG_DRIVER(
8714                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8715                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8716                 "connectors_changed:%d\n",
8717                 acrtc->crtc_id,
8718                 new_crtc_state->enable,
8719                 new_crtc_state->active,
8720                 new_crtc_state->planes_changed,
8721                 new_crtc_state->mode_changed,
8722                 new_crtc_state->active_changed,
8723                 new_crtc_state->connectors_changed);
8724
8725         /* Remove stream for any changed/disabled CRTC */
8726         if (!enable) {
8727
8728                 if (!dm_old_crtc_state->stream)
8729                         goto skip_modeset;
8730
8731                 ret = dm_atomic_get_state(state, &dm_state);
8732                 if (ret)
8733                         goto fail;
8734
8735                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8736                                 crtc->base.id);
8737
8738                 /* i.e. reset mode */
8739                 if (dc_remove_stream_from_ctx(
8740                                 dm->dc,
8741                                 dm_state->context,
8742                                 dm_old_crtc_state->stream) != DC_OK) {
8743                         ret = -EINVAL;
8744                         goto fail;
8745                 }
8746
8747                 dc_stream_release(dm_old_crtc_state->stream);
8748                 dm_new_crtc_state->stream = NULL;
8749
8750                 reset_freesync_config_for_crtc(dm_new_crtc_state);
8751
8752                 *lock_and_validation_needed = true;
8753
8754         } else {/* Add stream for any updated/enabled CRTC */
8755                 /*
8756                  * Quick fix to prevent NULL pointer on new_stream when
8757                  * added MST connectors not found in existing crtc_state in the chained mode
8758                  * TODO: need to dig out the root cause of that
8759                  */
8760                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8761                         goto skip_modeset;
8762
8763                 if (modereset_required(new_crtc_state))
8764                         goto skip_modeset;
8765
8766                 if (modeset_required(new_crtc_state, new_stream,
8767                                      dm_old_crtc_state->stream)) {
8768
8769                         WARN_ON(dm_new_crtc_state->stream);
8770
8771                         ret = dm_atomic_get_state(state, &dm_state);
8772                         if (ret)
8773                                 goto fail;
8774
8775                         dm_new_crtc_state->stream = new_stream;
8776
8777                         dc_stream_retain(new_stream);
8778
8779                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8780                                                 crtc->base.id);
8781
8782                         if (dc_add_stream_to_ctx(
8783                                         dm->dc,
8784                                         dm_state->context,
8785                                         dm_new_crtc_state->stream) != DC_OK) {
8786                                 ret = -EINVAL;
8787                                 goto fail;
8788                         }
8789
8790                         *lock_and_validation_needed = true;
8791                 }
8792         }
8793
8794 skip_modeset:
8795         /* Release extra reference */
8796         if (new_stream)
8797                  dc_stream_release(new_stream);
8798
8799         /*
8800          * We want to do dc stream updates that do not require a
8801          * full modeset below.
8802          */
8803         if (!(enable && aconnector && new_crtc_state->active))
8804                 return 0;
8805         /*
8806          * Given above conditions, the dc state cannot be NULL because:
8807          * 1. We're in the process of enabling CRTCs (just been added
8808          *    to the dc context, or already is on the context)
8809          * 2. Has a valid connector attached, and
8810          * 3. Is currently active and enabled.
8811          * => The dc stream state currently exists.
8812          */
8813         BUG_ON(dm_new_crtc_state->stream == NULL);
8814
8815         /* Scaling or underscan settings */
8816         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8817                 update_stream_scaling_settings(
8818                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8819
8820         /* ABM settings */
8821         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8822
8823         /*
8824          * Color management settings. We also update color properties
8825          * when a modeset is needed, to ensure it gets reprogrammed.
8826          */
8827         if (dm_new_crtc_state->base.color_mgmt_changed ||
8828             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8829                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8830                 if (ret)
8831                         goto fail;
8832         }
8833
8834         /* Update Freesync settings. */
8835         get_freesync_config_for_crtc(dm_new_crtc_state,
8836                                      dm_new_conn_state);
8837
8838         return ret;
8839
8840 fail:
8841         if (new_stream)
8842                 dc_stream_release(new_stream);
8843         return ret;
8844 }
8845
8846 static bool should_reset_plane(struct drm_atomic_state *state,
8847                                struct drm_plane *plane,
8848                                struct drm_plane_state *old_plane_state,
8849                                struct drm_plane_state *new_plane_state)
8850 {
8851         struct drm_plane *other;
8852         struct drm_plane_state *old_other_state, *new_other_state;
8853         struct drm_crtc_state *new_crtc_state;
8854         int i;
8855
8856         /*
8857          * TODO: Remove this hack once the checks below are sufficient
8858          * enough to determine when we need to reset all the planes on
8859          * the stream.
8860          */
8861         if (state->allow_modeset)
8862                 return true;
8863
8864         /* Exit early if we know that we're adding or removing the plane. */
8865         if (old_plane_state->crtc != new_plane_state->crtc)
8866                 return true;
8867
8868         /* old crtc == new_crtc == NULL, plane not in context. */
8869         if (!new_plane_state->crtc)
8870                 return false;
8871
8872         new_crtc_state =
8873                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8874
8875         if (!new_crtc_state)
8876                 return true;
8877
8878         /* CRTC Degamma changes currently require us to recreate planes. */
8879         if (new_crtc_state->color_mgmt_changed)
8880                 return true;
8881
8882         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8883                 return true;
8884
8885         /*
8886          * If there are any new primary or overlay planes being added or
8887          * removed then the z-order can potentially change. To ensure
8888          * correct z-order and pipe acquisition the current DC architecture
8889          * requires us to remove and recreate all existing planes.
8890          *
8891          * TODO: Come up with a more elegant solution for this.
8892          */
8893         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8894                 struct amdgpu_framebuffer *old_afb, *new_afb;
8895                 if (other->type == DRM_PLANE_TYPE_CURSOR)
8896                         continue;
8897
8898                 if (old_other_state->crtc != new_plane_state->crtc &&
8899                     new_other_state->crtc != new_plane_state->crtc)
8900                         continue;
8901
8902                 if (old_other_state->crtc != new_other_state->crtc)
8903                         return true;
8904
8905                 /* Src/dst size and scaling updates. */
8906                 if (old_other_state->src_w != new_other_state->src_w ||
8907                     old_other_state->src_h != new_other_state->src_h ||
8908                     old_other_state->crtc_w != new_other_state->crtc_w ||
8909                     old_other_state->crtc_h != new_other_state->crtc_h)
8910                         return true;
8911
8912                 /* Rotation / mirroring updates. */
8913                 if (old_other_state->rotation != new_other_state->rotation)
8914                         return true;
8915
8916                 /* Blending updates. */
8917                 if (old_other_state->pixel_blend_mode !=
8918                     new_other_state->pixel_blend_mode)
8919                         return true;
8920
8921                 /* Alpha updates. */
8922                 if (old_other_state->alpha != new_other_state->alpha)
8923                         return true;
8924
8925                 /* Colorspace changes. */
8926                 if (old_other_state->color_range != new_other_state->color_range ||
8927                     old_other_state->color_encoding != new_other_state->color_encoding)
8928                         return true;
8929
8930                 /* Framebuffer checks fall at the end. */
8931                 if (!old_other_state->fb || !new_other_state->fb)
8932                         continue;
8933
8934                 /* Pixel format changes can require bandwidth updates. */
8935                 if (old_other_state->fb->format != new_other_state->fb->format)
8936                         return true;
8937
8938                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8939                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8940
8941                 /* Tiling and DCC changes also require bandwidth updates. */
8942                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8943                     old_afb->base.modifier != new_afb->base.modifier)
8944                         return true;
8945         }
8946
8947         return false;
8948 }
8949
8950 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8951                               struct drm_plane_state *new_plane_state,
8952                               struct drm_framebuffer *fb)
8953 {
8954         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8955         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8956         unsigned int pitch;
8957         bool linear;
8958
8959         if (fb->width > new_acrtc->max_cursor_width ||
8960             fb->height > new_acrtc->max_cursor_height) {
8961                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8962                                  new_plane_state->fb->width,
8963                                  new_plane_state->fb->height);
8964                 return -EINVAL;
8965         }
8966         if (new_plane_state->src_w != fb->width << 16 ||
8967             new_plane_state->src_h != fb->height << 16) {
8968                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8969                 return -EINVAL;
8970         }
8971
8972         /* Pitch in pixels */
8973         pitch = fb->pitches[0] / fb->format->cpp[0];
8974
8975         if (fb->width != pitch) {
8976                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
8977                                  fb->width, pitch);
8978                 return -EINVAL;
8979         }
8980
8981         switch (pitch) {
8982         case 64:
8983         case 128:
8984         case 256:
8985                 /* FB pitch is supported by cursor plane */
8986                 break;
8987         default:
8988                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
8989                 return -EINVAL;
8990         }
8991
8992         /* Core DRM takes care of checking FB modifiers, so we only need to
8993          * check tiling flags when the FB doesn't have a modifier. */
8994         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
8995                 if (adev->family < AMDGPU_FAMILY_AI) {
8996                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
8997                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
8998                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
8999                 } else {
9000                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9001                 }
9002                 if (!linear) {
9003                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9004                         return -EINVAL;
9005                 }
9006         }
9007
9008         return 0;
9009 }
9010
9011 static int dm_update_plane_state(struct dc *dc,
9012                                  struct drm_atomic_state *state,
9013                                  struct drm_plane *plane,
9014                                  struct drm_plane_state *old_plane_state,
9015                                  struct drm_plane_state *new_plane_state,
9016                                  bool enable,
9017                                  bool *lock_and_validation_needed)
9018 {
9019
9020         struct dm_atomic_state *dm_state = NULL;
9021         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9022         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9023         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9024         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9025         struct amdgpu_crtc *new_acrtc;
9026         bool needs_reset;
9027         int ret = 0;
9028
9029
9030         new_plane_crtc = new_plane_state->crtc;
9031         old_plane_crtc = old_plane_state->crtc;
9032         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9033         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9034
9035         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9036                 if (!enable || !new_plane_crtc ||
9037                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9038                         return 0;
9039
9040                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9041
9042                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9043                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9044                         return -EINVAL;
9045                 }
9046
9047                 if (new_plane_state->fb) {
9048                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9049                                                  new_plane_state->fb);
9050                         if (ret)
9051                                 return ret;
9052                 }
9053
9054                 return 0;
9055         }
9056
9057         needs_reset = should_reset_plane(state, plane, old_plane_state,
9058                                          new_plane_state);
9059
9060         /* Remove any changed/removed planes */
9061         if (!enable) {
9062                 if (!needs_reset)
9063                         return 0;
9064
9065                 if (!old_plane_crtc)
9066                         return 0;
9067
9068                 old_crtc_state = drm_atomic_get_old_crtc_state(
9069                                 state, old_plane_crtc);
9070                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9071
9072                 if (!dm_old_crtc_state->stream)
9073                         return 0;
9074
9075                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9076                                 plane->base.id, old_plane_crtc->base.id);
9077
9078                 ret = dm_atomic_get_state(state, &dm_state);
9079                 if (ret)
9080                         return ret;
9081
9082                 if (!dc_remove_plane_from_context(
9083                                 dc,
9084                                 dm_old_crtc_state->stream,
9085                                 dm_old_plane_state->dc_state,
9086                                 dm_state->context)) {
9087
9088                         return -EINVAL;
9089                 }
9090
9091
9092                 dc_plane_state_release(dm_old_plane_state->dc_state);
9093                 dm_new_plane_state->dc_state = NULL;
9094
9095                 *lock_and_validation_needed = true;
9096
9097         } else { /* Add new planes */
9098                 struct dc_plane_state *dc_new_plane_state;
9099
9100                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9101                         return 0;
9102
9103                 if (!new_plane_crtc)
9104                         return 0;
9105
9106                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9107                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9108
9109                 if (!dm_new_crtc_state->stream)
9110                         return 0;
9111
9112                 if (!needs_reset)
9113                         return 0;
9114
9115                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9116                 if (ret)
9117                         return ret;
9118
9119                 WARN_ON(dm_new_plane_state->dc_state);
9120
9121                 dc_new_plane_state = dc_create_plane_state(dc);
9122                 if (!dc_new_plane_state)
9123                         return -ENOMEM;
9124
9125                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9126                                 plane->base.id, new_plane_crtc->base.id);
9127
9128                 ret = fill_dc_plane_attributes(
9129                         drm_to_adev(new_plane_crtc->dev),
9130                         dc_new_plane_state,
9131                         new_plane_state,
9132                         new_crtc_state);
9133                 if (ret) {
9134                         dc_plane_state_release(dc_new_plane_state);
9135                         return ret;
9136                 }
9137
9138                 ret = dm_atomic_get_state(state, &dm_state);
9139                 if (ret) {
9140                         dc_plane_state_release(dc_new_plane_state);
9141                         return ret;
9142                 }
9143
9144                 /*
9145                  * Any atomic check errors that occur after this will
9146                  * not need a release. The plane state will be attached
9147                  * to the stream, and therefore part of the atomic
9148                  * state. It'll be released when the atomic state is
9149                  * cleaned.
9150                  */
9151                 if (!dc_add_plane_to_context(
9152                                 dc,
9153                                 dm_new_crtc_state->stream,
9154                                 dc_new_plane_state,
9155                                 dm_state->context)) {
9156
9157                         dc_plane_state_release(dc_new_plane_state);
9158                         return -EINVAL;
9159                 }
9160
9161                 dm_new_plane_state->dc_state = dc_new_plane_state;
9162
9163                 /* Tell DC to do a full surface update every time there
9164                  * is a plane change. Inefficient, but works for now.
9165                  */
9166                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9167
9168                 *lock_and_validation_needed = true;
9169         }
9170
9171
9172         return ret;
9173 }
9174
9175 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9176                                 struct drm_crtc *crtc,
9177                                 struct drm_crtc_state *new_crtc_state)
9178 {
9179         struct drm_plane_state *new_cursor_state, *new_primary_state;
9180         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9181
9182         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9183          * cursor per pipe but it's going to inherit the scaling and
9184          * positioning from the underlying pipe. Check the cursor plane's
9185          * blending properties match the primary plane's. */
9186
9187         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9188         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9189         if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9190                 return 0;
9191         }
9192
9193         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9194                          (new_cursor_state->src_w >> 16);
9195         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9196                          (new_cursor_state->src_h >> 16);
9197
9198         primary_scale_w = new_primary_state->crtc_w * 1000 /
9199                          (new_primary_state->src_w >> 16);
9200         primary_scale_h = new_primary_state->crtc_h * 1000 /
9201                          (new_primary_state->src_h >> 16);
9202
9203         if (cursor_scale_w != primary_scale_w ||
9204             cursor_scale_h != primary_scale_h) {
9205                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9206                 return -EINVAL;
9207         }
9208
9209         return 0;
9210 }
9211
9212 #if defined(CONFIG_DRM_AMD_DC_DCN)
9213 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9214 {
9215         struct drm_connector *connector;
9216         struct drm_connector_state *conn_state;
9217         struct amdgpu_dm_connector *aconnector = NULL;
9218         int i;
9219         for_each_new_connector_in_state(state, connector, conn_state, i) {
9220                 if (conn_state->crtc != crtc)
9221                         continue;
9222
9223                 aconnector = to_amdgpu_dm_connector(connector);
9224                 if (!aconnector->port || !aconnector->mst_port)
9225                         aconnector = NULL;
9226                 else
9227                         break;
9228         }
9229
9230         if (!aconnector)
9231                 return 0;
9232
9233         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9234 }
9235 #endif
9236
9237 /**
9238  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9239  * @dev: The DRM device
9240  * @state: The atomic state to commit
9241  *
9242  * Validate that the given atomic state is programmable by DC into hardware.
9243  * This involves constructing a &struct dc_state reflecting the new hardware
9244  * state we wish to commit, then querying DC to see if it is programmable. It's
9245  * important not to modify the existing DC state. Otherwise, atomic_check
9246  * may unexpectedly commit hardware changes.
9247  *
9248  * When validating the DC state, it's important that the right locks are
9249  * acquired. For full updates case which removes/adds/updates streams on one
9250  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9251  * that any such full update commit will wait for completion of any outstanding
9252  * flip using DRMs synchronization events.
9253  *
9254  * Note that DM adds the affected connectors for all CRTCs in state, when that
9255  * might not seem necessary. This is because DC stream creation requires the
9256  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9257  * be possible but non-trivial - a possible TODO item.
9258  *
9259  * Return: -Error code if validation failed.
9260  */
9261 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9262                                   struct drm_atomic_state *state)
9263 {
9264         struct amdgpu_device *adev = drm_to_adev(dev);
9265         struct dm_atomic_state *dm_state = NULL;
9266         struct dc *dc = adev->dm.dc;
9267         struct drm_connector *connector;
9268         struct drm_connector_state *old_con_state, *new_con_state;
9269         struct drm_crtc *crtc;
9270         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9271         struct drm_plane *plane;
9272         struct drm_plane_state *old_plane_state, *new_plane_state;
9273         enum dc_status status;
9274         int ret, i;
9275         bool lock_and_validation_needed = false;
9276         struct dm_crtc_state *dm_old_crtc_state;
9277
9278         trace_amdgpu_dm_atomic_check_begin(state);
9279
9280         ret = drm_atomic_helper_check_modeset(dev, state);
9281         if (ret)
9282                 goto fail;
9283
9284         /* Check connector changes */
9285         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9286                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9287                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9288
9289                 /* Skip connectors that are disabled or part of modeset already. */
9290                 if (!old_con_state->crtc && !new_con_state->crtc)
9291                         continue;
9292
9293                 if (!new_con_state->crtc)
9294                         continue;
9295
9296                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9297                 if (IS_ERR(new_crtc_state)) {
9298                         ret = PTR_ERR(new_crtc_state);
9299                         goto fail;
9300                 }
9301
9302                 if (dm_old_con_state->abm_level !=
9303                     dm_new_con_state->abm_level)
9304                         new_crtc_state->connectors_changed = true;
9305         }
9306
9307 #if defined(CONFIG_DRM_AMD_DC_DCN)
9308         if (adev->asic_type >= CHIP_NAVI10) {
9309                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9310                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9311                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9312                                 if (ret)
9313                                         goto fail;
9314                         }
9315                 }
9316         }
9317 #endif
9318         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9319                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9320
9321                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9322                     !new_crtc_state->color_mgmt_changed &&
9323                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9324                         dm_old_crtc_state->dsc_force_changed == false)
9325                         continue;
9326
9327                 if (!new_crtc_state->enable)
9328                         continue;
9329
9330                 ret = drm_atomic_add_affected_connectors(state, crtc);
9331                 if (ret)
9332                         return ret;
9333
9334                 ret = drm_atomic_add_affected_planes(state, crtc);
9335                 if (ret)
9336                         goto fail;
9337
9338                 if (dm_old_crtc_state->dsc_force_changed)
9339                         new_crtc_state->mode_changed = true;
9340         }
9341
9342         /*
9343          * Add all primary and overlay planes on the CRTC to the state
9344          * whenever a plane is enabled to maintain correct z-ordering
9345          * and to enable fast surface updates.
9346          */
9347         drm_for_each_crtc(crtc, dev) {
9348                 bool modified = false;
9349
9350                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9351                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9352                                 continue;
9353
9354                         if (new_plane_state->crtc == crtc ||
9355                             old_plane_state->crtc == crtc) {
9356                                 modified = true;
9357                                 break;
9358                         }
9359                 }
9360
9361                 if (!modified)
9362                         continue;
9363
9364                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9365                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9366                                 continue;
9367
9368                         new_plane_state =
9369                                 drm_atomic_get_plane_state(state, plane);
9370
9371                         if (IS_ERR(new_plane_state)) {
9372                                 ret = PTR_ERR(new_plane_state);
9373                                 goto fail;
9374                         }
9375                 }
9376         }
9377
9378         /* Remove exiting planes if they are modified */
9379         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9380                 ret = dm_update_plane_state(dc, state, plane,
9381                                             old_plane_state,
9382                                             new_plane_state,
9383                                             false,
9384                                             &lock_and_validation_needed);
9385                 if (ret)
9386                         goto fail;
9387         }
9388
9389         /* Disable all crtcs which require disable */
9390         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9391                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9392                                            old_crtc_state,
9393                                            new_crtc_state,
9394                                            false,
9395                                            &lock_and_validation_needed);
9396                 if (ret)
9397                         goto fail;
9398         }
9399
9400         /* Enable all crtcs which require enable */
9401         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9402                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9403                                            old_crtc_state,
9404                                            new_crtc_state,
9405                                            true,
9406                                            &lock_and_validation_needed);
9407                 if (ret)
9408                         goto fail;
9409         }
9410
9411         /* Add new/modified planes */
9412         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9413                 ret = dm_update_plane_state(dc, state, plane,
9414                                             old_plane_state,
9415                                             new_plane_state,
9416                                             true,
9417                                             &lock_and_validation_needed);
9418                 if (ret)
9419                         goto fail;
9420         }
9421
9422         /* Run this here since we want to validate the streams we created */
9423         ret = drm_atomic_helper_check_planes(dev, state);
9424         if (ret)
9425                 goto fail;
9426
9427         /* Check cursor planes scaling */
9428         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9429                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9430                 if (ret)
9431                         goto fail;
9432         }
9433
9434         if (state->legacy_cursor_update) {
9435                 /*
9436                  * This is a fast cursor update coming from the plane update
9437                  * helper, check if it can be done asynchronously for better
9438                  * performance.
9439                  */
9440                 state->async_update =
9441                         !drm_atomic_helper_async_check(dev, state);
9442
9443                 /*
9444                  * Skip the remaining global validation if this is an async
9445                  * update. Cursor updates can be done without affecting
9446                  * state or bandwidth calcs and this avoids the performance
9447                  * penalty of locking the private state object and
9448                  * allocating a new dc_state.
9449                  */
9450                 if (state->async_update)
9451                         return 0;
9452         }
9453
9454         /* Check scaling and underscan changes*/
9455         /* TODO Removed scaling changes validation due to inability to commit
9456          * new stream into context w\o causing full reset. Need to
9457          * decide how to handle.
9458          */
9459         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9460                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9461                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9462                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9463
9464                 /* Skip any modesets/resets */
9465                 if (!acrtc || drm_atomic_crtc_needs_modeset(
9466                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9467                         continue;
9468
9469                 /* Skip any thing not scale or underscan changes */
9470                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9471                         continue;
9472
9473                 lock_and_validation_needed = true;
9474         }
9475
9476         /**
9477          * Streams and planes are reset when there are changes that affect
9478          * bandwidth. Anything that affects bandwidth needs to go through
9479          * DC global validation to ensure that the configuration can be applied
9480          * to hardware.
9481          *
9482          * We have to currently stall out here in atomic_check for outstanding
9483          * commits to finish in this case because our IRQ handlers reference
9484          * DRM state directly - we can end up disabling interrupts too early
9485          * if we don't.
9486          *
9487          * TODO: Remove this stall and drop DM state private objects.
9488          */
9489         if (lock_and_validation_needed) {
9490                 ret = dm_atomic_get_state(state, &dm_state);
9491                 if (ret)
9492                         goto fail;
9493
9494                 ret = do_aquire_global_lock(dev, state);
9495                 if (ret)
9496                         goto fail;
9497
9498 #if defined(CONFIG_DRM_AMD_DC_DCN)
9499                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9500                         goto fail;
9501
9502                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9503                 if (ret)
9504                         goto fail;
9505 #endif
9506
9507                 /*
9508                  * Perform validation of MST topology in the state:
9509                  * We need to perform MST atomic check before calling
9510                  * dc_validate_global_state(), or there is a chance
9511                  * to get stuck in an infinite loop and hang eventually.
9512                  */
9513                 ret = drm_dp_mst_atomic_check(state);
9514                 if (ret)
9515                         goto fail;
9516                 status = dc_validate_global_state(dc, dm_state->context, false);
9517                 if (status != DC_OK) {
9518                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
9519                                        dc_status_to_str(status), status);
9520                         ret = -EINVAL;
9521                         goto fail;
9522                 }
9523         } else {
9524                 /*
9525                  * The commit is a fast update. Fast updates shouldn't change
9526                  * the DC context, affect global validation, and can have their
9527                  * commit work done in parallel with other commits not touching
9528                  * the same resource. If we have a new DC context as part of
9529                  * the DM atomic state from validation we need to free it and
9530                  * retain the existing one instead.
9531                  *
9532                  * Furthermore, since the DM atomic state only contains the DC
9533                  * context and can safely be annulled, we can free the state
9534                  * and clear the associated private object now to free
9535                  * some memory and avoid a possible use-after-free later.
9536                  */
9537
9538                 for (i = 0; i < state->num_private_objs; i++) {
9539                         struct drm_private_obj *obj = state->private_objs[i].ptr;
9540
9541                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
9542                                 int j = state->num_private_objs-1;
9543
9544                                 dm_atomic_destroy_state(obj,
9545                                                 state->private_objs[i].state);
9546
9547                                 /* If i is not at the end of the array then the
9548                                  * last element needs to be moved to where i was
9549                                  * before the array can safely be truncated.
9550                                  */
9551                                 if (i != j)
9552                                         state->private_objs[i] =
9553                                                 state->private_objs[j];
9554
9555                                 state->private_objs[j].ptr = NULL;
9556                                 state->private_objs[j].state = NULL;
9557                                 state->private_objs[j].old_state = NULL;
9558                                 state->private_objs[j].new_state = NULL;
9559
9560                                 state->num_private_objs = j;
9561                                 break;
9562                         }
9563                 }
9564         }
9565
9566         /* Store the overall update type for use later in atomic check. */
9567         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9568                 struct dm_crtc_state *dm_new_crtc_state =
9569                         to_dm_crtc_state(new_crtc_state);
9570
9571                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9572                                                          UPDATE_TYPE_FULL :
9573                                                          UPDATE_TYPE_FAST;
9574         }
9575
9576         /* Must be success */
9577         WARN_ON(ret);
9578
9579         trace_amdgpu_dm_atomic_check_finish(state, ret);
9580
9581         return ret;
9582
9583 fail:
9584         if (ret == -EDEADLK)
9585                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9586         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9587                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9588         else
9589                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9590
9591         trace_amdgpu_dm_atomic_check_finish(state, ret);
9592
9593         return ret;
9594 }
9595
9596 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9597                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
9598 {
9599         uint8_t dpcd_data;
9600         bool capable = false;
9601
9602         if (amdgpu_dm_connector->dc_link &&
9603                 dm_helpers_dp_read_dpcd(
9604                                 NULL,
9605                                 amdgpu_dm_connector->dc_link,
9606                                 DP_DOWN_STREAM_PORT_COUNT,
9607                                 &dpcd_data,
9608                                 sizeof(dpcd_data))) {
9609                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9610         }
9611
9612         return capable;
9613 }
9614 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9615                                         struct edid *edid)
9616 {
9617         int i;
9618         bool edid_check_required;
9619         struct detailed_timing *timing;
9620         struct detailed_non_pixel *data;
9621         struct detailed_data_monitor_range *range;
9622         struct amdgpu_dm_connector *amdgpu_dm_connector =
9623                         to_amdgpu_dm_connector(connector);
9624         struct dm_connector_state *dm_con_state = NULL;
9625
9626         struct drm_device *dev = connector->dev;
9627         struct amdgpu_device *adev = drm_to_adev(dev);
9628         bool freesync_capable = false;
9629
9630         if (!connector->state) {
9631                 DRM_ERROR("%s - Connector has no state", __func__);
9632                 goto update;
9633         }
9634
9635         if (!edid) {
9636                 dm_con_state = to_dm_connector_state(connector->state);
9637
9638                 amdgpu_dm_connector->min_vfreq = 0;
9639                 amdgpu_dm_connector->max_vfreq = 0;
9640                 amdgpu_dm_connector->pixel_clock_mhz = 0;
9641
9642                 goto update;
9643         }
9644
9645         dm_con_state = to_dm_connector_state(connector->state);
9646
9647         edid_check_required = false;
9648         if (!amdgpu_dm_connector->dc_sink) {
9649                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9650                 goto update;
9651         }
9652         if (!adev->dm.freesync_module)
9653                 goto update;
9654         /*
9655          * if edid non zero restrict freesync only for dp and edp
9656          */
9657         if (edid) {
9658                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9659                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9660                         edid_check_required = is_dp_capable_without_timing_msa(
9661                                                 adev->dm.dc,
9662                                                 amdgpu_dm_connector);
9663                 }
9664         }
9665         if (edid_check_required == true && (edid->version > 1 ||
9666            (edid->version == 1 && edid->revision > 1))) {
9667                 for (i = 0; i < 4; i++) {
9668
9669                         timing  = &edid->detailed_timings[i];
9670                         data    = &timing->data.other_data;
9671                         range   = &data->data.range;
9672                         /*
9673                          * Check if monitor has continuous frequency mode
9674                          */
9675                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
9676                                 continue;
9677                         /*
9678                          * Check for flag range limits only. If flag == 1 then
9679                          * no additional timing information provided.
9680                          * Default GTF, GTF Secondary curve and CVT are not
9681                          * supported
9682                          */
9683                         if (range->flags != 1)
9684                                 continue;
9685
9686                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9687                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9688                         amdgpu_dm_connector->pixel_clock_mhz =
9689                                 range->pixel_clock_mhz * 10;
9690
9691                         connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9692                         connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9693
9694                         break;
9695                 }
9696
9697                 if (amdgpu_dm_connector->max_vfreq -
9698                     amdgpu_dm_connector->min_vfreq > 10) {
9699
9700                         freesync_capable = true;
9701                 }
9702         }
9703
9704 update:
9705         if (dm_con_state)
9706                 dm_con_state->freesync_capable = freesync_capable;
9707
9708         if (connector->vrr_capable_property)
9709                 drm_connector_set_vrr_capable_property(connector,
9710                                                        freesync_capable);
9711 }
9712
9713 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9714 {
9715         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9716
9717         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9718                 return;
9719         if (link->type == dc_connection_none)
9720                 return;
9721         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9722                                         dpcd_data, sizeof(dpcd_data))) {
9723                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9724
9725                 if (dpcd_data[0] == 0) {
9726                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9727                         link->psr_settings.psr_feature_enabled = false;
9728                 } else {
9729                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
9730                         link->psr_settings.psr_feature_enabled = true;
9731                 }
9732
9733                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9734         }
9735 }
9736
9737 /*
9738  * amdgpu_dm_link_setup_psr() - configure psr link
9739  * @stream: stream state
9740  *
9741  * Return: true if success
9742  */
9743 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9744 {
9745         struct dc_link *link = NULL;
9746         struct psr_config psr_config = {0};
9747         struct psr_context psr_context = {0};
9748         bool ret = false;
9749
9750         if (stream == NULL)
9751                 return false;
9752
9753         link = stream->link;
9754
9755         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9756
9757         if (psr_config.psr_version > 0) {
9758                 psr_config.psr_exit_link_training_required = 0x1;
9759                 psr_config.psr_frame_capture_indication_req = 0;
9760                 psr_config.psr_rfb_setup_time = 0x37;
9761                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9762                 psr_config.allow_smu_optimizations = 0x0;
9763
9764                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9765
9766         }
9767         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
9768
9769         return ret;
9770 }
9771
9772 /*
9773  * amdgpu_dm_psr_enable() - enable psr f/w
9774  * @stream: stream state
9775  *
9776  * Return: true if success
9777  */
9778 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9779 {
9780         struct dc_link *link = stream->link;
9781         unsigned int vsync_rate_hz = 0;
9782         struct dc_static_screen_params params = {0};
9783         /* Calculate number of static frames before generating interrupt to
9784          * enter PSR.
9785          */
9786         // Init fail safe of 2 frames static
9787         unsigned int num_frames_static = 2;
9788
9789         DRM_DEBUG_DRIVER("Enabling psr...\n");
9790
9791         vsync_rate_hz = div64_u64(div64_u64((
9792                         stream->timing.pix_clk_100hz * 100),
9793                         stream->timing.v_total),
9794                         stream->timing.h_total);
9795
9796         /* Round up
9797          * Calculate number of frames such that at least 30 ms of time has
9798          * passed.
9799          */
9800         if (vsync_rate_hz != 0) {
9801                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9802                 num_frames_static = (30000 / frame_time_microsec) + 1;
9803         }
9804
9805         params.triggers.cursor_update = true;
9806         params.triggers.overlay_update = true;
9807         params.triggers.surface_update = true;
9808         params.num_frames = num_frames_static;
9809
9810         dc_stream_set_static_screen_params(link->ctx->dc,
9811                                            &stream, 1,
9812                                            &params);
9813
9814         return dc_link_set_psr_allow_active(link, true, false, false);
9815 }
9816
9817 /*
9818  * amdgpu_dm_psr_disable() - disable psr f/w
9819  * @stream:  stream state
9820  *
9821  * Return: true if success
9822  */
9823 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9824 {
9825
9826         DRM_DEBUG_DRIVER("Disabling psr...\n");
9827
9828         return dc_link_set_psr_allow_active(stream->link, false, true, false);
9829 }
9830
9831 /*
9832  * amdgpu_dm_psr_disable() - disable psr f/w
9833  * if psr is enabled on any stream
9834  *
9835  * Return: true if success
9836  */
9837 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9838 {
9839         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9840         return dc_set_psr_allow_active(dm->dc, false);
9841 }
9842
9843 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9844 {
9845         struct amdgpu_device *adev = drm_to_adev(dev);
9846         struct dc *dc = adev->dm.dc;
9847         int i;
9848
9849         mutex_lock(&adev->dm.dc_lock);
9850         if (dc->current_state) {
9851                 for (i = 0; i < dc->current_state->stream_count; ++i)
9852                         dc->current_state->streams[i]
9853                                 ->triggered_crtc_reset.enabled =
9854                                 adev->dm.force_timing_sync;
9855
9856                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9857                 dc_trigger_sync(dc, dc->current_state);
9858         }
9859         mutex_unlock(&adev->dm.dc_lock);
9860 }
9861
9862 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9863                        uint32_t value, const char *func_name)
9864 {
9865 #ifdef DM_CHECK_ADDR_0
9866         if (address == 0) {
9867                 DC_ERR("invalid register write. address = 0");
9868                 return;
9869         }
9870 #endif
9871         cgs_write_register(ctx->cgs_device, address, value);
9872         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9873 }
9874
9875 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9876                           const char *func_name)
9877 {
9878         uint32_t value;
9879 #ifdef DM_CHECK_ADDR_0
9880         if (address == 0) {
9881                 DC_ERR("invalid register read; address = 0\n");
9882                 return 0;
9883         }
9884 #endif
9885
9886         if (ctx->dmub_srv &&
9887             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9888             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9889                 ASSERT(false);
9890                 return 0;
9891         }
9892
9893         value = cgs_read_register(ctx->cgs_device, address);
9894
9895         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9896
9897         return value;
9898 }