Merge tag 'linux-kselftest-kunit-fixes-5.11-rc5' of git://git.kernel.org/pub/scm...
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58
59 #include "ivsrcid/ivsrcid_vislands30.h"
60
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/version.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 #include <drm/drm_hdcp.h>
80
81 #if defined(CONFIG_DRM_AMD_DC_DCN)
82 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83
84 #include "dcn/dcn_1_0_offset.h"
85 #include "dcn/dcn_1_0_sh_mask.h"
86 #include "soc15_hw_ip.h"
87 #include "vega10_ip_offset.h"
88
89 #include "soc15_common.h"
90 #endif
91
92 #include "modules/inc/mod_freesync.h"
93 #include "modules/power/power_helpers.h"
94 #include "modules/inc/mod_info_packet.h"
95
96 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
104 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
106 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108
109 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111
112 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114
115 /* Number of bytes in PSP header for firmware. */
116 #define PSP_HEADER_BYTES 0x100
117
118 /* Number of bytes in PSP footer for firmware. */
119 #define PSP_FOOTER_BYTES 0x100
120
121 /**
122  * DOC: overview
123  *
124  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126  * requests into DC requests, and DC responses into DRM responses.
127  *
128  * The root control structure is &struct amdgpu_display_manager.
129  */
130
131 /* basic init/fini API */
132 static int amdgpu_dm_init(struct amdgpu_device *adev);
133 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137         switch (link->dpcd_caps.dongle_type) {
138         case DISPLAY_DONGLE_NONE:
139                 return DRM_MODE_SUBCONNECTOR_Native;
140         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141                 return DRM_MODE_SUBCONNECTOR_VGA;
142         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143         case DISPLAY_DONGLE_DP_DVI_DONGLE:
144                 return DRM_MODE_SUBCONNECTOR_DVID;
145         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147                 return DRM_MODE_SUBCONNECTOR_HDMIA;
148         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149         default:
150                 return DRM_MODE_SUBCONNECTOR_Unknown;
151         }
152 }
153
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156         struct dc_link *link = aconnector->dc_link;
157         struct drm_connector *connector = &aconnector->base;
158         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159
160         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161                 return;
162
163         if (aconnector->dc_sink)
164                 subconnector = get_subconnector_type(link);
165
166         drm_object_property_set_value(&connector->base,
167                         connector->dev->mode_config.dp_subconnector_property,
168                         subconnector);
169 }
170
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183                                 struct drm_plane *plane,
184                                 unsigned long possible_crtcs,
185                                 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187                                struct drm_plane *plane,
188                                uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
191                                     uint32_t link_index,
192                                     struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194                                   struct amdgpu_encoder *aencoder,
195                                   uint32_t link_index);
196
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202                                   struct drm_atomic_state *state);
203
204 static void handle_cursor_update(struct drm_plane *plane,
205                                  struct drm_plane_state *old_plane_state);
206
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215
216 /*
217  * dm_vblank_get_counter
218  *
219  * @brief
220  * Get counter for number of vertical blanks
221  *
222  * @param
223  * struct amdgpu_device *adev - [in] desired amdgpu device
224  * int disp_idx - [in] which CRTC to get the counter from
225  *
226  * @return
227  * Counter for vertical blanks
228  */
229 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
230 {
231         if (crtc >= adev->mode_info.num_crtc)
232                 return 0;
233         else {
234                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
235
236                 if (acrtc->dm_irq_params.stream == NULL) {
237                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
238                                   crtc);
239                         return 0;
240                 }
241
242                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
243         }
244 }
245
246 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
247                                   u32 *vbl, u32 *position)
248 {
249         uint32_t v_blank_start, v_blank_end, h_position, v_position;
250
251         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
252                 return -EINVAL;
253         else {
254                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
255
256                 if (acrtc->dm_irq_params.stream ==  NULL) {
257                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
258                                   crtc);
259                         return 0;
260                 }
261
262                 /*
263                  * TODO rework base driver to use values directly.
264                  * for now parse it back into reg-format
265                  */
266                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
267                                          &v_blank_start,
268                                          &v_blank_end,
269                                          &h_position,
270                                          &v_position);
271
272                 *position = v_position | (h_position << 16);
273                 *vbl = v_blank_start | (v_blank_end << 16);
274         }
275
276         return 0;
277 }
278
279 static bool dm_is_idle(void *handle)
280 {
281         /* XXX todo */
282         return true;
283 }
284
285 static int dm_wait_for_idle(void *handle)
286 {
287         /* XXX todo */
288         return 0;
289 }
290
291 static bool dm_check_soft_reset(void *handle)
292 {
293         return false;
294 }
295
296 static int dm_soft_reset(void *handle)
297 {
298         /* XXX todo */
299         return 0;
300 }
301
302 static struct amdgpu_crtc *
303 get_crtc_by_otg_inst(struct amdgpu_device *adev,
304                      int otg_inst)
305 {
306         struct drm_device *dev = adev_to_drm(adev);
307         struct drm_crtc *crtc;
308         struct amdgpu_crtc *amdgpu_crtc;
309
310         if (otg_inst == -1) {
311                 WARN_ON(1);
312                 return adev->mode_info.crtcs[0];
313         }
314
315         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
316                 amdgpu_crtc = to_amdgpu_crtc(crtc);
317
318                 if (amdgpu_crtc->otg_inst == otg_inst)
319                         return amdgpu_crtc;
320         }
321
322         return NULL;
323 }
324
325 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
326 {
327         return acrtc->dm_irq_params.freesync_config.state ==
328                        VRR_STATE_ACTIVE_VARIABLE ||
329                acrtc->dm_irq_params.freesync_config.state ==
330                        VRR_STATE_ACTIVE_FIXED;
331 }
332
333 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
334 {
335         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
336                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
337 }
338
339 /**
340  * dm_pflip_high_irq() - Handle pageflip interrupt
341  * @interrupt_params: ignored
342  *
343  * Handles the pageflip interrupt by notifying all interested parties
344  * that the pageflip has been completed.
345  */
346 static void dm_pflip_high_irq(void *interrupt_params)
347 {
348         struct amdgpu_crtc *amdgpu_crtc;
349         struct common_irq_params *irq_params = interrupt_params;
350         struct amdgpu_device *adev = irq_params->adev;
351         unsigned long flags;
352         struct drm_pending_vblank_event *e;
353         uint32_t vpos, hpos, v_blank_start, v_blank_end;
354         bool vrr_active;
355
356         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
357
358         /* IRQ could occur when in initial stage */
359         /* TODO work and BO cleanup */
360         if (amdgpu_crtc == NULL) {
361                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
362                 return;
363         }
364
365         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
366
367         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
368                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
369                                                  amdgpu_crtc->pflip_status,
370                                                  AMDGPU_FLIP_SUBMITTED,
371                                                  amdgpu_crtc->crtc_id,
372                                                  amdgpu_crtc);
373                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
374                 return;
375         }
376
377         /* page flip completed. */
378         e = amdgpu_crtc->event;
379         amdgpu_crtc->event = NULL;
380
381         if (!e)
382                 WARN_ON(1);
383
384         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
385
386         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
387         if (!vrr_active ||
388             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
389                                       &v_blank_end, &hpos, &vpos) ||
390             (vpos < v_blank_start)) {
391                 /* Update to correct count and vblank timestamp if racing with
392                  * vblank irq. This also updates to the correct vblank timestamp
393                  * even in VRR mode, as scanout is past the front-porch atm.
394                  */
395                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
396
397                 /* Wake up userspace by sending the pageflip event with proper
398                  * count and timestamp of vblank of flip completion.
399                  */
400                 if (e) {
401                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
402
403                         /* Event sent, so done with vblank for this flip */
404                         drm_crtc_vblank_put(&amdgpu_crtc->base);
405                 }
406         } else if (e) {
407                 /* VRR active and inside front-porch: vblank count and
408                  * timestamp for pageflip event will only be up to date after
409                  * drm_crtc_handle_vblank() has been executed from late vblank
410                  * irq handler after start of back-porch (vline 0). We queue the
411                  * pageflip event for send-out by drm_crtc_handle_vblank() with
412                  * updated timestamp and count, once it runs after us.
413                  *
414                  * We need to open-code this instead of using the helper
415                  * drm_crtc_arm_vblank_event(), as that helper would
416                  * call drm_crtc_accurate_vblank_count(), which we must
417                  * not call in VRR mode while we are in front-porch!
418                  */
419
420                 /* sequence will be replaced by real count during send-out. */
421                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
422                 e->pipe = amdgpu_crtc->crtc_id;
423
424                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
425                 e = NULL;
426         }
427
428         /* Keep track of vblank of this flip for flip throttling. We use the
429          * cooked hw counter, as that one incremented at start of this vblank
430          * of pageflip completion, so last_flip_vblank is the forbidden count
431          * for queueing new pageflips if vsync + VRR is enabled.
432          */
433         amdgpu_crtc->dm_irq_params.last_flip_vblank =
434                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
435
436         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
437         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
438
439         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
440                          amdgpu_crtc->crtc_id, amdgpu_crtc,
441                          vrr_active, (int) !e);
442 }
443
444 static void dm_vupdate_high_irq(void *interrupt_params)
445 {
446         struct common_irq_params *irq_params = interrupt_params;
447         struct amdgpu_device *adev = irq_params->adev;
448         struct amdgpu_crtc *acrtc;
449         unsigned long flags;
450         int vrr_active;
451
452         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
453
454         if (acrtc) {
455                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
456
457                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
458                               acrtc->crtc_id,
459                               vrr_active);
460
461                 /* Core vblank handling is done here after end of front-porch in
462                  * vrr mode, as vblank timestamping will give valid results
463                  * while now done after front-porch. This will also deliver
464                  * page-flip completion events that have been queued to us
465                  * if a pageflip happened inside front-porch.
466                  */
467                 if (vrr_active) {
468                         drm_crtc_handle_vblank(&acrtc->base);
469
470                         /* BTR processing for pre-DCE12 ASICs */
471                         if (acrtc->dm_irq_params.stream &&
472                             adev->family < AMDGPU_FAMILY_AI) {
473                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
474                                 mod_freesync_handle_v_update(
475                                     adev->dm.freesync_module,
476                                     acrtc->dm_irq_params.stream,
477                                     &acrtc->dm_irq_params.vrr_params);
478
479                                 dc_stream_adjust_vmin_vmax(
480                                     adev->dm.dc,
481                                     acrtc->dm_irq_params.stream,
482                                     &acrtc->dm_irq_params.vrr_params.adjust);
483                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
484                         }
485                 }
486         }
487 }
488
489 /**
490  * dm_crtc_high_irq() - Handles CRTC interrupt
491  * @interrupt_params: used for determining the CRTC instance
492  *
493  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
494  * event handler.
495  */
496 static void dm_crtc_high_irq(void *interrupt_params)
497 {
498         struct common_irq_params *irq_params = interrupt_params;
499         struct amdgpu_device *adev = irq_params->adev;
500         struct amdgpu_crtc *acrtc;
501         unsigned long flags;
502         int vrr_active;
503
504         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
505         if (!acrtc)
506                 return;
507
508         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
509
510         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
511                       vrr_active, acrtc->dm_irq_params.active_planes);
512
513         /**
514          * Core vblank handling at start of front-porch is only possible
515          * in non-vrr mode, as only there vblank timestamping will give
516          * valid results while done in front-porch. Otherwise defer it
517          * to dm_vupdate_high_irq after end of front-porch.
518          */
519         if (!vrr_active)
520                 drm_crtc_handle_vblank(&acrtc->base);
521
522         /**
523          * Following stuff must happen at start of vblank, for crc
524          * computation and below-the-range btr support in vrr mode.
525          */
526         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
527
528         /* BTR updates need to happen before VUPDATE on Vega and above. */
529         if (adev->family < AMDGPU_FAMILY_AI)
530                 return;
531
532         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
533
534         if (acrtc->dm_irq_params.stream &&
535             acrtc->dm_irq_params.vrr_params.supported &&
536             acrtc->dm_irq_params.freesync_config.state ==
537                     VRR_STATE_ACTIVE_VARIABLE) {
538                 mod_freesync_handle_v_update(adev->dm.freesync_module,
539                                              acrtc->dm_irq_params.stream,
540                                              &acrtc->dm_irq_params.vrr_params);
541
542                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
543                                            &acrtc->dm_irq_params.vrr_params.adjust);
544         }
545
546         /*
547          * If there aren't any active_planes then DCH HUBP may be clock-gated.
548          * In that case, pageflip completion interrupts won't fire and pageflip
549          * completion events won't get delivered. Prevent this by sending
550          * pending pageflip events from here if a flip is still pending.
551          *
552          * If any planes are enabled, use dm_pflip_high_irq() instead, to
553          * avoid race conditions between flip programming and completion,
554          * which could cause too early flip completion events.
555          */
556         if (adev->family >= AMDGPU_FAMILY_RV &&
557             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
558             acrtc->dm_irq_params.active_planes == 0) {
559                 if (acrtc->event) {
560                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
561                         acrtc->event = NULL;
562                         drm_crtc_vblank_put(&acrtc->base);
563                 }
564                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
565         }
566
567         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
568 }
569
570 static int dm_set_clockgating_state(void *handle,
571                   enum amd_clockgating_state state)
572 {
573         return 0;
574 }
575
576 static int dm_set_powergating_state(void *handle,
577                   enum amd_powergating_state state)
578 {
579         return 0;
580 }
581
582 /* Prototypes of private functions */
583 static int dm_early_init(void* handle);
584
585 /* Allocate memory for FBC compressed data  */
586 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
587 {
588         struct drm_device *dev = connector->dev;
589         struct amdgpu_device *adev = drm_to_adev(dev);
590         struct dm_compressor_info *compressor = &adev->dm.compressor;
591         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
592         struct drm_display_mode *mode;
593         unsigned long max_size = 0;
594
595         if (adev->dm.dc->fbc_compressor == NULL)
596                 return;
597
598         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
599                 return;
600
601         if (compressor->bo_ptr)
602                 return;
603
604
605         list_for_each_entry(mode, &connector->modes, head) {
606                 if (max_size < mode->htotal * mode->vtotal)
607                         max_size = mode->htotal * mode->vtotal;
608         }
609
610         if (max_size) {
611                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
612                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
613                             &compressor->gpu_addr, &compressor->cpu_addr);
614
615                 if (r)
616                         DRM_ERROR("DM: Failed to initialize FBC\n");
617                 else {
618                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
619                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
620                 }
621
622         }
623
624 }
625
626 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
627                                           int pipe, bool *enabled,
628                                           unsigned char *buf, int max_bytes)
629 {
630         struct drm_device *dev = dev_get_drvdata(kdev);
631         struct amdgpu_device *adev = drm_to_adev(dev);
632         struct drm_connector *connector;
633         struct drm_connector_list_iter conn_iter;
634         struct amdgpu_dm_connector *aconnector;
635         int ret = 0;
636
637         *enabled = false;
638
639         mutex_lock(&adev->dm.audio_lock);
640
641         drm_connector_list_iter_begin(dev, &conn_iter);
642         drm_for_each_connector_iter(connector, &conn_iter) {
643                 aconnector = to_amdgpu_dm_connector(connector);
644                 if (aconnector->audio_inst != port)
645                         continue;
646
647                 *enabled = true;
648                 ret = drm_eld_size(connector->eld);
649                 memcpy(buf, connector->eld, min(max_bytes, ret));
650
651                 break;
652         }
653         drm_connector_list_iter_end(&conn_iter);
654
655         mutex_unlock(&adev->dm.audio_lock);
656
657         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
658
659         return ret;
660 }
661
662 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
663         .get_eld = amdgpu_dm_audio_component_get_eld,
664 };
665
666 static int amdgpu_dm_audio_component_bind(struct device *kdev,
667                                        struct device *hda_kdev, void *data)
668 {
669         struct drm_device *dev = dev_get_drvdata(kdev);
670         struct amdgpu_device *adev = drm_to_adev(dev);
671         struct drm_audio_component *acomp = data;
672
673         acomp->ops = &amdgpu_dm_audio_component_ops;
674         acomp->dev = kdev;
675         adev->dm.audio_component = acomp;
676
677         return 0;
678 }
679
680 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
681                                           struct device *hda_kdev, void *data)
682 {
683         struct drm_device *dev = dev_get_drvdata(kdev);
684         struct amdgpu_device *adev = drm_to_adev(dev);
685         struct drm_audio_component *acomp = data;
686
687         acomp->ops = NULL;
688         acomp->dev = NULL;
689         adev->dm.audio_component = NULL;
690 }
691
692 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
693         .bind   = amdgpu_dm_audio_component_bind,
694         .unbind = amdgpu_dm_audio_component_unbind,
695 };
696
697 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
698 {
699         int i, ret;
700
701         if (!amdgpu_audio)
702                 return 0;
703
704         adev->mode_info.audio.enabled = true;
705
706         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
707
708         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
709                 adev->mode_info.audio.pin[i].channels = -1;
710                 adev->mode_info.audio.pin[i].rate = -1;
711                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
712                 adev->mode_info.audio.pin[i].status_bits = 0;
713                 adev->mode_info.audio.pin[i].category_code = 0;
714                 adev->mode_info.audio.pin[i].connected = false;
715                 adev->mode_info.audio.pin[i].id =
716                         adev->dm.dc->res_pool->audios[i]->inst;
717                 adev->mode_info.audio.pin[i].offset = 0;
718         }
719
720         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
721         if (ret < 0)
722                 return ret;
723
724         adev->dm.audio_registered = true;
725
726         return 0;
727 }
728
729 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
730 {
731         if (!amdgpu_audio)
732                 return;
733
734         if (!adev->mode_info.audio.enabled)
735                 return;
736
737         if (adev->dm.audio_registered) {
738                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
739                 adev->dm.audio_registered = false;
740         }
741
742         /* TODO: Disable audio? */
743
744         adev->mode_info.audio.enabled = false;
745 }
746
747 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
748 {
749         struct drm_audio_component *acomp = adev->dm.audio_component;
750
751         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
752                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
753
754                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
755                                                  pin, -1);
756         }
757 }
758
759 static int dm_dmub_hw_init(struct amdgpu_device *adev)
760 {
761         const struct dmcub_firmware_header_v1_0 *hdr;
762         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
763         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
764         const struct firmware *dmub_fw = adev->dm.dmub_fw;
765         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
766         struct abm *abm = adev->dm.dc->res_pool->abm;
767         struct dmub_srv_hw_params hw_params;
768         enum dmub_status status;
769         const unsigned char *fw_inst_const, *fw_bss_data;
770         uint32_t i, fw_inst_const_size, fw_bss_data_size;
771         bool has_hw_support;
772
773         if (!dmub_srv)
774                 /* DMUB isn't supported on the ASIC. */
775                 return 0;
776
777         if (!fb_info) {
778                 DRM_ERROR("No framebuffer info for DMUB service.\n");
779                 return -EINVAL;
780         }
781
782         if (!dmub_fw) {
783                 /* Firmware required for DMUB support. */
784                 DRM_ERROR("No firmware provided for DMUB.\n");
785                 return -EINVAL;
786         }
787
788         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
789         if (status != DMUB_STATUS_OK) {
790                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
791                 return -EINVAL;
792         }
793
794         if (!has_hw_support) {
795                 DRM_INFO("DMUB unsupported on ASIC\n");
796                 return 0;
797         }
798
799         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
800
801         fw_inst_const = dmub_fw->data +
802                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
803                         PSP_HEADER_BYTES;
804
805         fw_bss_data = dmub_fw->data +
806                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
807                       le32_to_cpu(hdr->inst_const_bytes);
808
809         /* Copy firmware and bios info into FB memory. */
810         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
811                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
812
813         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
814
815         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
816          * amdgpu_ucode_init_single_fw will load dmub firmware
817          * fw_inst_const part to cw0; otherwise, the firmware back door load
818          * will be done by dm_dmub_hw_init
819          */
820         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
821                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
822                                 fw_inst_const_size);
823         }
824
825         if (fw_bss_data_size)
826                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
827                        fw_bss_data, fw_bss_data_size);
828
829         /* Copy firmware bios info into FB memory. */
830         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
831                adev->bios_size);
832
833         /* Reset regions that need to be reset. */
834         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
835         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
836
837         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
838                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
839
840         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
841                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
842
843         /* Initialize hardware. */
844         memset(&hw_params, 0, sizeof(hw_params));
845         hw_params.fb_base = adev->gmc.fb_start;
846         hw_params.fb_offset = adev->gmc.aper_base;
847
848         /* backdoor load firmware and trigger dmub running */
849         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
850                 hw_params.load_inst_const = true;
851
852         if (dmcu)
853                 hw_params.psp_version = dmcu->psp_version;
854
855         for (i = 0; i < fb_info->num_fb; ++i)
856                 hw_params.fb[i] = &fb_info->fb[i];
857
858         status = dmub_srv_hw_init(dmub_srv, &hw_params);
859         if (status != DMUB_STATUS_OK) {
860                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
861                 return -EINVAL;
862         }
863
864         /* Wait for firmware load to finish. */
865         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
866         if (status != DMUB_STATUS_OK)
867                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
868
869         /* Init DMCU and ABM if available. */
870         if (dmcu && abm) {
871                 dmcu->funcs->dmcu_init(dmcu);
872                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
873         }
874
875         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
876         if (!adev->dm.dc->ctx->dmub_srv) {
877                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
878                 return -ENOMEM;
879         }
880
881         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
882                  adev->dm.dmcub_fw_version);
883
884         return 0;
885 }
886
887 #if defined(CONFIG_DRM_AMD_DC_DCN)
888 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
889 {
890         uint64_t pt_base;
891         uint32_t logical_addr_low;
892         uint32_t logical_addr_high;
893         uint32_t agp_base, agp_bot, agp_top;
894         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
895
896         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
897         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
898
899         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
900                 /*
901                  * Raven2 has a HW issue that it is unable to use the vram which
902                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
903                  * workaround that increase system aperture high address (add 1)
904                  * to get rid of the VM fault and hardware hang.
905                  */
906                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
907         else
908                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
909
910         agp_base = 0;
911         agp_bot = adev->gmc.agp_start >> 24;
912         agp_top = adev->gmc.agp_end >> 24;
913
914
915         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
916         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
917         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
918         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
919         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
920         page_table_base.low_part = lower_32_bits(pt_base);
921
922         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
923         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
924
925         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
926         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
927         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
928
929         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
930         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
931         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
932
933         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
934         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
935         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
936
937         pa_config->is_hvm_enabled = 0;
938
939 }
940 #endif
941
942 static int amdgpu_dm_init(struct amdgpu_device *adev)
943 {
944         struct dc_init_data init_data;
945 #ifdef CONFIG_DRM_AMD_DC_HDCP
946         struct dc_callback_init init_params;
947 #endif
948         int r;
949
950         adev->dm.ddev = adev_to_drm(adev);
951         adev->dm.adev = adev;
952
953         /* Zero all the fields */
954         memset(&init_data, 0, sizeof(init_data));
955 #ifdef CONFIG_DRM_AMD_DC_HDCP
956         memset(&init_params, 0, sizeof(init_params));
957 #endif
958
959         mutex_init(&adev->dm.dc_lock);
960         mutex_init(&adev->dm.audio_lock);
961
962         if(amdgpu_dm_irq_init(adev)) {
963                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
964                 goto error;
965         }
966
967         init_data.asic_id.chip_family = adev->family;
968
969         init_data.asic_id.pci_revision_id = adev->pdev->revision;
970         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
971
972         init_data.asic_id.vram_width = adev->gmc.vram_width;
973         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
974         init_data.asic_id.atombios_base_address =
975                 adev->mode_info.atom_context->bios;
976
977         init_data.driver = adev;
978
979         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
980
981         if (!adev->dm.cgs_device) {
982                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
983                 goto error;
984         }
985
986         init_data.cgs_device = adev->dm.cgs_device;
987
988         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
989
990         switch (adev->asic_type) {
991         case CHIP_CARRIZO:
992         case CHIP_STONEY:
993         case CHIP_RAVEN:
994         case CHIP_RENOIR:
995                 init_data.flags.gpu_vm_support = true;
996                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
997                         init_data.flags.disable_dmcu = true;
998                 break;
999 #if defined(CONFIG_DRM_AMD_DC_DCN)
1000         case CHIP_VANGOGH:
1001                 init_data.flags.gpu_vm_support = true;
1002                 break;
1003 #endif
1004         default:
1005                 break;
1006         }
1007
1008         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1009                 init_data.flags.fbc_support = true;
1010
1011         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1012                 init_data.flags.multi_mon_pp_mclk_switch = true;
1013
1014         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1015                 init_data.flags.disable_fractional_pwm = true;
1016
1017         init_data.flags.power_down_display_on_boot = true;
1018
1019         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1020
1021         /* Display Core create. */
1022         adev->dm.dc = dc_create(&init_data);
1023
1024         if (adev->dm.dc) {
1025                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1026         } else {
1027                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1028                 goto error;
1029         }
1030
1031         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1032                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1033                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1034         }
1035
1036         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1037                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1038
1039         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1040                 adev->dm.dc->debug.disable_stutter = true;
1041
1042         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1043                 adev->dm.dc->debug.disable_dsc = true;
1044
1045         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1046                 adev->dm.dc->debug.disable_clock_gate = true;
1047
1048         r = dm_dmub_hw_init(adev);
1049         if (r) {
1050                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1051                 goto error;
1052         }
1053
1054         dc_hardware_init(adev->dm.dc);
1055
1056 #if defined(CONFIG_DRM_AMD_DC_DCN)
1057         if (adev->apu_flags) {
1058                 struct dc_phy_addr_space_config pa_config;
1059
1060                 mmhub_read_system_context(adev, &pa_config);
1061
1062                 // Call the DC init_memory func
1063                 dc_setup_system_context(adev->dm.dc, &pa_config);
1064         }
1065 #endif
1066
1067         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1068         if (!adev->dm.freesync_module) {
1069                 DRM_ERROR(
1070                 "amdgpu: failed to initialize freesync_module.\n");
1071         } else
1072                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1073                                 adev->dm.freesync_module);
1074
1075         amdgpu_dm_init_color_mod();
1076
1077 #ifdef CONFIG_DRM_AMD_DC_HDCP
1078         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1079                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1080
1081                 if (!adev->dm.hdcp_workqueue)
1082                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1083                 else
1084                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1085
1086                 dc_init_callbacks(adev->dm.dc, &init_params);
1087         }
1088 #endif
1089         if (amdgpu_dm_initialize_drm_device(adev)) {
1090                 DRM_ERROR(
1091                 "amdgpu: failed to initialize sw for display support.\n");
1092                 goto error;
1093         }
1094
1095         /* create fake encoders for MST */
1096         dm_dp_create_fake_mst_encoders(adev);
1097
1098         /* TODO: Add_display_info? */
1099
1100         /* TODO use dynamic cursor width */
1101         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1102         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1103
1104         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1105                 DRM_ERROR(
1106                 "amdgpu: failed to initialize sw for display support.\n");
1107                 goto error;
1108         }
1109
1110
1111         DRM_DEBUG_DRIVER("KMS initialized.\n");
1112
1113         return 0;
1114 error:
1115         amdgpu_dm_fini(adev);
1116
1117         return -EINVAL;
1118 }
1119
1120 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1121 {
1122         int i;
1123
1124         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1125                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1126         }
1127
1128         amdgpu_dm_audio_fini(adev);
1129
1130         amdgpu_dm_destroy_drm_device(&adev->dm);
1131
1132 #ifdef CONFIG_DRM_AMD_DC_HDCP
1133         if (adev->dm.hdcp_workqueue) {
1134                 hdcp_destroy(adev->dm.hdcp_workqueue);
1135                 adev->dm.hdcp_workqueue = NULL;
1136         }
1137
1138         if (adev->dm.dc)
1139                 dc_deinit_callbacks(adev->dm.dc);
1140 #endif
1141         if (adev->dm.dc->ctx->dmub_srv) {
1142                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1143                 adev->dm.dc->ctx->dmub_srv = NULL;
1144         }
1145
1146         if (adev->dm.dmub_bo)
1147                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1148                                       &adev->dm.dmub_bo_gpu_addr,
1149                                       &adev->dm.dmub_bo_cpu_addr);
1150
1151         /* DC Destroy TODO: Replace destroy DAL */
1152         if (adev->dm.dc)
1153                 dc_destroy(&adev->dm.dc);
1154         /*
1155          * TODO: pageflip, vlank interrupt
1156          *
1157          * amdgpu_dm_irq_fini(adev);
1158          */
1159
1160         if (adev->dm.cgs_device) {
1161                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1162                 adev->dm.cgs_device = NULL;
1163         }
1164         if (adev->dm.freesync_module) {
1165                 mod_freesync_destroy(adev->dm.freesync_module);
1166                 adev->dm.freesync_module = NULL;
1167         }
1168
1169         mutex_destroy(&adev->dm.audio_lock);
1170         mutex_destroy(&adev->dm.dc_lock);
1171
1172         return;
1173 }
1174
1175 static int load_dmcu_fw(struct amdgpu_device *adev)
1176 {
1177         const char *fw_name_dmcu = NULL;
1178         int r;
1179         const struct dmcu_firmware_header_v1_0 *hdr;
1180
1181         switch(adev->asic_type) {
1182 #if defined(CONFIG_DRM_AMD_DC_SI)
1183         case CHIP_TAHITI:
1184         case CHIP_PITCAIRN:
1185         case CHIP_VERDE:
1186         case CHIP_OLAND:
1187 #endif
1188         case CHIP_BONAIRE:
1189         case CHIP_HAWAII:
1190         case CHIP_KAVERI:
1191         case CHIP_KABINI:
1192         case CHIP_MULLINS:
1193         case CHIP_TONGA:
1194         case CHIP_FIJI:
1195         case CHIP_CARRIZO:
1196         case CHIP_STONEY:
1197         case CHIP_POLARIS11:
1198         case CHIP_POLARIS10:
1199         case CHIP_POLARIS12:
1200         case CHIP_VEGAM:
1201         case CHIP_VEGA10:
1202         case CHIP_VEGA12:
1203         case CHIP_VEGA20:
1204         case CHIP_NAVI10:
1205         case CHIP_NAVI14:
1206         case CHIP_RENOIR:
1207         case CHIP_SIENNA_CICHLID:
1208         case CHIP_NAVY_FLOUNDER:
1209         case CHIP_DIMGREY_CAVEFISH:
1210         case CHIP_VANGOGH:
1211                 return 0;
1212         case CHIP_NAVI12:
1213                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1214                 break;
1215         case CHIP_RAVEN:
1216                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1217                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1218                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1219                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1220                 else
1221                         return 0;
1222                 break;
1223         default:
1224                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1225                 return -EINVAL;
1226         }
1227
1228         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1229                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1230                 return 0;
1231         }
1232
1233         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1234         if (r == -ENOENT) {
1235                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1236                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1237                 adev->dm.fw_dmcu = NULL;
1238                 return 0;
1239         }
1240         if (r) {
1241                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1242                         fw_name_dmcu);
1243                 return r;
1244         }
1245
1246         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1247         if (r) {
1248                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1249                         fw_name_dmcu);
1250                 release_firmware(adev->dm.fw_dmcu);
1251                 adev->dm.fw_dmcu = NULL;
1252                 return r;
1253         }
1254
1255         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1256         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1257         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1258         adev->firmware.fw_size +=
1259                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1260
1261         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1262         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1263         adev->firmware.fw_size +=
1264                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1265
1266         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1267
1268         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1269
1270         return 0;
1271 }
1272
1273 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1274 {
1275         struct amdgpu_device *adev = ctx;
1276
1277         return dm_read_reg(adev->dm.dc->ctx, address);
1278 }
1279
1280 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1281                                      uint32_t value)
1282 {
1283         struct amdgpu_device *adev = ctx;
1284
1285         return dm_write_reg(adev->dm.dc->ctx, address, value);
1286 }
1287
1288 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1289 {
1290         struct dmub_srv_create_params create_params;
1291         struct dmub_srv_region_params region_params;
1292         struct dmub_srv_region_info region_info;
1293         struct dmub_srv_fb_params fb_params;
1294         struct dmub_srv_fb_info *fb_info;
1295         struct dmub_srv *dmub_srv;
1296         const struct dmcub_firmware_header_v1_0 *hdr;
1297         const char *fw_name_dmub;
1298         enum dmub_asic dmub_asic;
1299         enum dmub_status status;
1300         int r;
1301
1302         switch (adev->asic_type) {
1303         case CHIP_RENOIR:
1304                 dmub_asic = DMUB_ASIC_DCN21;
1305                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1306                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1307                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1308                 break;
1309         case CHIP_SIENNA_CICHLID:
1310                 dmub_asic = DMUB_ASIC_DCN30;
1311                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1312                 break;
1313         case CHIP_NAVY_FLOUNDER:
1314                 dmub_asic = DMUB_ASIC_DCN30;
1315                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1316                 break;
1317         case CHIP_VANGOGH:
1318                 dmub_asic = DMUB_ASIC_DCN301;
1319                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1320                 break;
1321         case CHIP_DIMGREY_CAVEFISH:
1322                 dmub_asic = DMUB_ASIC_DCN302;
1323                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1324                 break;
1325
1326         default:
1327                 /* ASIC doesn't support DMUB. */
1328                 return 0;
1329         }
1330
1331         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1332         if (r) {
1333                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1334                 return 0;
1335         }
1336
1337         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1338         if (r) {
1339                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1340                 return 0;
1341         }
1342
1343         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1344
1345         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1346                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1347                         AMDGPU_UCODE_ID_DMCUB;
1348                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1349                         adev->dm.dmub_fw;
1350                 adev->firmware.fw_size +=
1351                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1352
1353                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1354                          adev->dm.dmcub_fw_version);
1355         }
1356
1357         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1358
1359         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1360         dmub_srv = adev->dm.dmub_srv;
1361
1362         if (!dmub_srv) {
1363                 DRM_ERROR("Failed to allocate DMUB service!\n");
1364                 return -ENOMEM;
1365         }
1366
1367         memset(&create_params, 0, sizeof(create_params));
1368         create_params.user_ctx = adev;
1369         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1370         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1371         create_params.asic = dmub_asic;
1372
1373         /* Create the DMUB service. */
1374         status = dmub_srv_create(dmub_srv, &create_params);
1375         if (status != DMUB_STATUS_OK) {
1376                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1377                 return -EINVAL;
1378         }
1379
1380         /* Calculate the size of all the regions for the DMUB service. */
1381         memset(&region_params, 0, sizeof(region_params));
1382
1383         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1384                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1385         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1386         region_params.vbios_size = adev->bios_size;
1387         region_params.fw_bss_data = region_params.bss_data_size ?
1388                 adev->dm.dmub_fw->data +
1389                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1390                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1391         region_params.fw_inst_const =
1392                 adev->dm.dmub_fw->data +
1393                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1394                 PSP_HEADER_BYTES;
1395
1396         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1397                                            &region_info);
1398
1399         if (status != DMUB_STATUS_OK) {
1400                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1401                 return -EINVAL;
1402         }
1403
1404         /*
1405          * Allocate a framebuffer based on the total size of all the regions.
1406          * TODO: Move this into GART.
1407          */
1408         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1409                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1410                                     &adev->dm.dmub_bo_gpu_addr,
1411                                     &adev->dm.dmub_bo_cpu_addr);
1412         if (r)
1413                 return r;
1414
1415         /* Rebase the regions on the framebuffer address. */
1416         memset(&fb_params, 0, sizeof(fb_params));
1417         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1418         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1419         fb_params.region_info = &region_info;
1420
1421         adev->dm.dmub_fb_info =
1422                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1423         fb_info = adev->dm.dmub_fb_info;
1424
1425         if (!fb_info) {
1426                 DRM_ERROR(
1427                         "Failed to allocate framebuffer info for DMUB service!\n");
1428                 return -ENOMEM;
1429         }
1430
1431         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1432         if (status != DMUB_STATUS_OK) {
1433                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1434                 return -EINVAL;
1435         }
1436
1437         return 0;
1438 }
1439
1440 static int dm_sw_init(void *handle)
1441 {
1442         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1443         int r;
1444
1445         r = dm_dmub_sw_init(adev);
1446         if (r)
1447                 return r;
1448
1449         return load_dmcu_fw(adev);
1450 }
1451
1452 static int dm_sw_fini(void *handle)
1453 {
1454         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1455
1456         kfree(adev->dm.dmub_fb_info);
1457         adev->dm.dmub_fb_info = NULL;
1458
1459         if (adev->dm.dmub_srv) {
1460                 dmub_srv_destroy(adev->dm.dmub_srv);
1461                 adev->dm.dmub_srv = NULL;
1462         }
1463
1464         release_firmware(adev->dm.dmub_fw);
1465         adev->dm.dmub_fw = NULL;
1466
1467         release_firmware(adev->dm.fw_dmcu);
1468         adev->dm.fw_dmcu = NULL;
1469
1470         return 0;
1471 }
1472
1473 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1474 {
1475         struct amdgpu_dm_connector *aconnector;
1476         struct drm_connector *connector;
1477         struct drm_connector_list_iter iter;
1478         int ret = 0;
1479
1480         drm_connector_list_iter_begin(dev, &iter);
1481         drm_for_each_connector_iter(connector, &iter) {
1482                 aconnector = to_amdgpu_dm_connector(connector);
1483                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1484                     aconnector->mst_mgr.aux) {
1485                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1486                                          aconnector,
1487                                          aconnector->base.base.id);
1488
1489                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1490                         if (ret < 0) {
1491                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1492                                 aconnector->dc_link->type =
1493                                         dc_connection_single;
1494                                 break;
1495                         }
1496                 }
1497         }
1498         drm_connector_list_iter_end(&iter);
1499
1500         return ret;
1501 }
1502
1503 static int dm_late_init(void *handle)
1504 {
1505         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1506
1507         struct dmcu_iram_parameters params;
1508         unsigned int linear_lut[16];
1509         int i;
1510         struct dmcu *dmcu = NULL;
1511         bool ret = true;
1512
1513         dmcu = adev->dm.dc->res_pool->dmcu;
1514
1515         for (i = 0; i < 16; i++)
1516                 linear_lut[i] = 0xFFFF * i / 15;
1517
1518         params.set = 0;
1519         params.backlight_ramping_start = 0xCCCC;
1520         params.backlight_ramping_reduction = 0xCCCCCCCC;
1521         params.backlight_lut_array_size = 16;
1522         params.backlight_lut_array = linear_lut;
1523
1524         /* Min backlight level after ABM reduction,  Don't allow below 1%
1525          * 0xFFFF x 0.01 = 0x28F
1526          */
1527         params.min_abm_backlight = 0x28F;
1528
1529         /* In the case where abm is implemented on dmcub,
1530          * dmcu object will be null.
1531          * ABM 2.4 and up are implemented on dmcub.
1532          */
1533         if (dmcu)
1534                 ret = dmcu_load_iram(dmcu, params);
1535         else if (adev->dm.dc->ctx->dmub_srv)
1536                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1537
1538         if (!ret)
1539                 return -EINVAL;
1540
1541         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1542 }
1543
1544 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1545 {
1546         struct amdgpu_dm_connector *aconnector;
1547         struct drm_connector *connector;
1548         struct drm_connector_list_iter iter;
1549         struct drm_dp_mst_topology_mgr *mgr;
1550         int ret;
1551         bool need_hotplug = false;
1552
1553         drm_connector_list_iter_begin(dev, &iter);
1554         drm_for_each_connector_iter(connector, &iter) {
1555                 aconnector = to_amdgpu_dm_connector(connector);
1556                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1557                     aconnector->mst_port)
1558                         continue;
1559
1560                 mgr = &aconnector->mst_mgr;
1561
1562                 if (suspend) {
1563                         drm_dp_mst_topology_mgr_suspend(mgr);
1564                 } else {
1565                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1566                         if (ret < 0) {
1567                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1568                                 need_hotplug = true;
1569                         }
1570                 }
1571         }
1572         drm_connector_list_iter_end(&iter);
1573
1574         if (need_hotplug)
1575                 drm_kms_helper_hotplug_event(dev);
1576 }
1577
1578 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1579 {
1580         struct smu_context *smu = &adev->smu;
1581         int ret = 0;
1582
1583         if (!is_support_sw_smu(adev))
1584                 return 0;
1585
1586         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1587          * on window driver dc implementation.
1588          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1589          * should be passed to smu during boot up and resume from s3.
1590          * boot up: dc calculate dcn watermark clock settings within dc_create,
1591          * dcn20_resource_construct
1592          * then call pplib functions below to pass the settings to smu:
1593          * smu_set_watermarks_for_clock_ranges
1594          * smu_set_watermarks_table
1595          * navi10_set_watermarks_table
1596          * smu_write_watermarks_table
1597          *
1598          * For Renoir, clock settings of dcn watermark are also fixed values.
1599          * dc has implemented different flow for window driver:
1600          * dc_hardware_init / dc_set_power_state
1601          * dcn10_init_hw
1602          * notify_wm_ranges
1603          * set_wm_ranges
1604          * -- Linux
1605          * smu_set_watermarks_for_clock_ranges
1606          * renoir_set_watermarks_table
1607          * smu_write_watermarks_table
1608          *
1609          * For Linux,
1610          * dc_hardware_init -> amdgpu_dm_init
1611          * dc_set_power_state --> dm_resume
1612          *
1613          * therefore, this function apply to navi10/12/14 but not Renoir
1614          * *
1615          */
1616         switch(adev->asic_type) {
1617         case CHIP_NAVI10:
1618         case CHIP_NAVI14:
1619         case CHIP_NAVI12:
1620                 break;
1621         default:
1622                 return 0;
1623         }
1624
1625         ret = smu_write_watermarks_table(smu);
1626         if (ret) {
1627                 DRM_ERROR("Failed to update WMTABLE!\n");
1628                 return ret;
1629         }
1630
1631         return 0;
1632 }
1633
1634 /**
1635  * dm_hw_init() - Initialize DC device
1636  * @handle: The base driver device containing the amdgpu_dm device.
1637  *
1638  * Initialize the &struct amdgpu_display_manager device. This involves calling
1639  * the initializers of each DM component, then populating the struct with them.
1640  *
1641  * Although the function implies hardware initialization, both hardware and
1642  * software are initialized here. Splitting them out to their relevant init
1643  * hooks is a future TODO item.
1644  *
1645  * Some notable things that are initialized here:
1646  *
1647  * - Display Core, both software and hardware
1648  * - DC modules that we need (freesync and color management)
1649  * - DRM software states
1650  * - Interrupt sources and handlers
1651  * - Vblank support
1652  * - Debug FS entries, if enabled
1653  */
1654 static int dm_hw_init(void *handle)
1655 {
1656         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1657         /* Create DAL display manager */
1658         amdgpu_dm_init(adev);
1659         amdgpu_dm_hpd_init(adev);
1660
1661         return 0;
1662 }
1663
1664 /**
1665  * dm_hw_fini() - Teardown DC device
1666  * @handle: The base driver device containing the amdgpu_dm device.
1667  *
1668  * Teardown components within &struct amdgpu_display_manager that require
1669  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1670  * were loaded. Also flush IRQ workqueues and disable them.
1671  */
1672 static int dm_hw_fini(void *handle)
1673 {
1674         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1675
1676         amdgpu_dm_hpd_fini(adev);
1677
1678         amdgpu_dm_irq_fini(adev);
1679         amdgpu_dm_fini(adev);
1680         return 0;
1681 }
1682
1683
1684 static int dm_enable_vblank(struct drm_crtc *crtc);
1685 static void dm_disable_vblank(struct drm_crtc *crtc);
1686
1687 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1688                                  struct dc_state *state, bool enable)
1689 {
1690         enum dc_irq_source irq_source;
1691         struct amdgpu_crtc *acrtc;
1692         int rc = -EBUSY;
1693         int i = 0;
1694
1695         for (i = 0; i < state->stream_count; i++) {
1696                 acrtc = get_crtc_by_otg_inst(
1697                                 adev, state->stream_status[i].primary_otg_inst);
1698
1699                 if (acrtc && state->stream_status[i].plane_count != 0) {
1700                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1701                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1702                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1703                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1704                         if (rc)
1705                                 DRM_WARN("Failed to %s pflip interrupts\n",
1706                                          enable ? "enable" : "disable");
1707
1708                         if (enable) {
1709                                 rc = dm_enable_vblank(&acrtc->base);
1710                                 if (rc)
1711                                         DRM_WARN("Failed to enable vblank interrupts\n");
1712                         } else {
1713                                 dm_disable_vblank(&acrtc->base);
1714                         }
1715
1716                 }
1717         }
1718
1719 }
1720
1721 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1722 {
1723         struct dc_state *context = NULL;
1724         enum dc_status res = DC_ERROR_UNEXPECTED;
1725         int i;
1726         struct dc_stream_state *del_streams[MAX_PIPES];
1727         int del_streams_count = 0;
1728
1729         memset(del_streams, 0, sizeof(del_streams));
1730
1731         context = dc_create_state(dc);
1732         if (context == NULL)
1733                 goto context_alloc_fail;
1734
1735         dc_resource_state_copy_construct_current(dc, context);
1736
1737         /* First remove from context all streams */
1738         for (i = 0; i < context->stream_count; i++) {
1739                 struct dc_stream_state *stream = context->streams[i];
1740
1741                 del_streams[del_streams_count++] = stream;
1742         }
1743
1744         /* Remove all planes for removed streams and then remove the streams */
1745         for (i = 0; i < del_streams_count; i++) {
1746                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1747                         res = DC_FAIL_DETACH_SURFACES;
1748                         goto fail;
1749                 }
1750
1751                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1752                 if (res != DC_OK)
1753                         goto fail;
1754         }
1755
1756
1757         res = dc_validate_global_state(dc, context, false);
1758
1759         if (res != DC_OK) {
1760                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1761                 goto fail;
1762         }
1763
1764         res = dc_commit_state(dc, context);
1765
1766 fail:
1767         dc_release_state(context);
1768
1769 context_alloc_fail:
1770         return res;
1771 }
1772
1773 static int dm_suspend(void *handle)
1774 {
1775         struct amdgpu_device *adev = handle;
1776         struct amdgpu_display_manager *dm = &adev->dm;
1777         int ret = 0;
1778
1779         if (amdgpu_in_reset(adev)) {
1780                 mutex_lock(&dm->dc_lock);
1781                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1782
1783                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1784
1785                 amdgpu_dm_commit_zero_streams(dm->dc);
1786
1787                 amdgpu_dm_irq_suspend(adev);
1788
1789                 return ret;
1790         }
1791
1792         WARN_ON(adev->dm.cached_state);
1793         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1794
1795         s3_handle_mst(adev_to_drm(adev), true);
1796
1797         amdgpu_dm_irq_suspend(adev);
1798
1799
1800         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1801
1802         return 0;
1803 }
1804
1805 static struct amdgpu_dm_connector *
1806 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1807                                              struct drm_crtc *crtc)
1808 {
1809         uint32_t i;
1810         struct drm_connector_state *new_con_state;
1811         struct drm_connector *connector;
1812         struct drm_crtc *crtc_from_state;
1813
1814         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1815                 crtc_from_state = new_con_state->crtc;
1816
1817                 if (crtc_from_state == crtc)
1818                         return to_amdgpu_dm_connector(connector);
1819         }
1820
1821         return NULL;
1822 }
1823
1824 static void emulated_link_detect(struct dc_link *link)
1825 {
1826         struct dc_sink_init_data sink_init_data = { 0 };
1827         struct display_sink_capability sink_caps = { 0 };
1828         enum dc_edid_status edid_status;
1829         struct dc_context *dc_ctx = link->ctx;
1830         struct dc_sink *sink = NULL;
1831         struct dc_sink *prev_sink = NULL;
1832
1833         link->type = dc_connection_none;
1834         prev_sink = link->local_sink;
1835
1836         if (prev_sink != NULL)
1837                 dc_sink_retain(prev_sink);
1838
1839         switch (link->connector_signal) {
1840         case SIGNAL_TYPE_HDMI_TYPE_A: {
1841                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1842                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1843                 break;
1844         }
1845
1846         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1847                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1848                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1849                 break;
1850         }
1851
1852         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1853                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1854                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1855                 break;
1856         }
1857
1858         case SIGNAL_TYPE_LVDS: {
1859                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1860                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1861                 break;
1862         }
1863
1864         case SIGNAL_TYPE_EDP: {
1865                 sink_caps.transaction_type =
1866                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1867                 sink_caps.signal = SIGNAL_TYPE_EDP;
1868                 break;
1869         }
1870
1871         case SIGNAL_TYPE_DISPLAY_PORT: {
1872                 sink_caps.transaction_type =
1873                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1874                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1875                 break;
1876         }
1877
1878         default:
1879                 DC_ERROR("Invalid connector type! signal:%d\n",
1880                         link->connector_signal);
1881                 return;
1882         }
1883
1884         sink_init_data.link = link;
1885         sink_init_data.sink_signal = sink_caps.signal;
1886
1887         sink = dc_sink_create(&sink_init_data);
1888         if (!sink) {
1889                 DC_ERROR("Failed to create sink!\n");
1890                 return;
1891         }
1892
1893         /* dc_sink_create returns a new reference */
1894         link->local_sink = sink;
1895
1896         edid_status = dm_helpers_read_local_edid(
1897                         link->ctx,
1898                         link,
1899                         sink);
1900
1901         if (edid_status != EDID_OK)
1902                 DC_ERROR("Failed to read EDID");
1903
1904 }
1905
1906 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1907                                      struct amdgpu_display_manager *dm)
1908 {
1909         struct {
1910                 struct dc_surface_update surface_updates[MAX_SURFACES];
1911                 struct dc_plane_info plane_infos[MAX_SURFACES];
1912                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1913                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1914                 struct dc_stream_update stream_update;
1915         } * bundle;
1916         int k, m;
1917
1918         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1919
1920         if (!bundle) {
1921                 dm_error("Failed to allocate update bundle\n");
1922                 goto cleanup;
1923         }
1924
1925         for (k = 0; k < dc_state->stream_count; k++) {
1926                 bundle->stream_update.stream = dc_state->streams[k];
1927
1928                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1929                         bundle->surface_updates[m].surface =
1930                                 dc_state->stream_status->plane_states[m];
1931                         bundle->surface_updates[m].surface->force_full_update =
1932                                 true;
1933                 }
1934                 dc_commit_updates_for_stream(
1935                         dm->dc, bundle->surface_updates,
1936                         dc_state->stream_status->plane_count,
1937                         dc_state->streams[k], &bundle->stream_update, dc_state);
1938         }
1939
1940 cleanup:
1941         kfree(bundle);
1942
1943         return;
1944 }
1945
1946 static void dm_set_dpms_off(struct dc_link *link)
1947 {
1948         struct dc_stream_state *stream_state;
1949         struct amdgpu_dm_connector *aconnector = link->priv;
1950         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1951         struct dc_stream_update stream_update;
1952         bool dpms_off = true;
1953
1954         memset(&stream_update, 0, sizeof(stream_update));
1955         stream_update.dpms_off = &dpms_off;
1956
1957         mutex_lock(&adev->dm.dc_lock);
1958         stream_state = dc_stream_find_from_link(link);
1959
1960         if (stream_state == NULL) {
1961                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1962                 mutex_unlock(&adev->dm.dc_lock);
1963                 return;
1964         }
1965
1966         stream_update.stream = stream_state;
1967         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1968                                      stream_state, &stream_update,
1969                                      stream_state->ctx->dc->current_state);
1970         mutex_unlock(&adev->dm.dc_lock);
1971 }
1972
1973 static int dm_resume(void *handle)
1974 {
1975         struct amdgpu_device *adev = handle;
1976         struct drm_device *ddev = adev_to_drm(adev);
1977         struct amdgpu_display_manager *dm = &adev->dm;
1978         struct amdgpu_dm_connector *aconnector;
1979         struct drm_connector *connector;
1980         struct drm_connector_list_iter iter;
1981         struct drm_crtc *crtc;
1982         struct drm_crtc_state *new_crtc_state;
1983         struct dm_crtc_state *dm_new_crtc_state;
1984         struct drm_plane *plane;
1985         struct drm_plane_state *new_plane_state;
1986         struct dm_plane_state *dm_new_plane_state;
1987         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1988         enum dc_connection_type new_connection_type = dc_connection_none;
1989         struct dc_state *dc_state;
1990         int i, r, j;
1991
1992         if (amdgpu_in_reset(adev)) {
1993                 dc_state = dm->cached_dc_state;
1994
1995                 r = dm_dmub_hw_init(adev);
1996                 if (r)
1997                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1998
1999                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2000                 dc_resume(dm->dc);
2001
2002                 amdgpu_dm_irq_resume_early(adev);
2003
2004                 for (i = 0; i < dc_state->stream_count; i++) {
2005                         dc_state->streams[i]->mode_changed = true;
2006                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2007                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2008                                         = 0xffffffff;
2009                         }
2010                 }
2011
2012                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2013
2014                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2015
2016                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2017
2018                 dc_release_state(dm->cached_dc_state);
2019                 dm->cached_dc_state = NULL;
2020
2021                 amdgpu_dm_irq_resume_late(adev);
2022
2023                 mutex_unlock(&dm->dc_lock);
2024
2025                 return 0;
2026         }
2027         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2028         dc_release_state(dm_state->context);
2029         dm_state->context = dc_create_state(dm->dc);
2030         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2031         dc_resource_state_construct(dm->dc, dm_state->context);
2032
2033         /* Before powering on DC we need to re-initialize DMUB. */
2034         r = dm_dmub_hw_init(adev);
2035         if (r)
2036                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2037
2038         /* power on hardware */
2039         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2040
2041         /* program HPD filter */
2042         dc_resume(dm->dc);
2043
2044         /*
2045          * early enable HPD Rx IRQ, should be done before set mode as short
2046          * pulse interrupts are used for MST
2047          */
2048         amdgpu_dm_irq_resume_early(adev);
2049
2050         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2051         s3_handle_mst(ddev, false);
2052
2053         /* Do detection*/
2054         drm_connector_list_iter_begin(ddev, &iter);
2055         drm_for_each_connector_iter(connector, &iter) {
2056                 aconnector = to_amdgpu_dm_connector(connector);
2057
2058                 /*
2059                  * this is the case when traversing through already created
2060                  * MST connectors, should be skipped
2061                  */
2062                 if (aconnector->mst_port)
2063                         continue;
2064
2065                 mutex_lock(&aconnector->hpd_lock);
2066                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2067                         DRM_ERROR("KMS: Failed to detect connector\n");
2068
2069                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2070                         emulated_link_detect(aconnector->dc_link);
2071                 else
2072                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2073
2074                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2075                         aconnector->fake_enable = false;
2076
2077                 if (aconnector->dc_sink)
2078                         dc_sink_release(aconnector->dc_sink);
2079                 aconnector->dc_sink = NULL;
2080                 amdgpu_dm_update_connector_after_detect(aconnector);
2081                 mutex_unlock(&aconnector->hpd_lock);
2082         }
2083         drm_connector_list_iter_end(&iter);
2084
2085         /* Force mode set in atomic commit */
2086         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2087                 new_crtc_state->active_changed = true;
2088
2089         /*
2090          * atomic_check is expected to create the dc states. We need to release
2091          * them here, since they were duplicated as part of the suspend
2092          * procedure.
2093          */
2094         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2095                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2096                 if (dm_new_crtc_state->stream) {
2097                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2098                         dc_stream_release(dm_new_crtc_state->stream);
2099                         dm_new_crtc_state->stream = NULL;
2100                 }
2101         }
2102
2103         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2104                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2105                 if (dm_new_plane_state->dc_state) {
2106                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2107                         dc_plane_state_release(dm_new_plane_state->dc_state);
2108                         dm_new_plane_state->dc_state = NULL;
2109                 }
2110         }
2111
2112         drm_atomic_helper_resume(ddev, dm->cached_state);
2113
2114         dm->cached_state = NULL;
2115
2116         amdgpu_dm_irq_resume_late(adev);
2117
2118         amdgpu_dm_smu_write_watermarks_table(adev);
2119
2120         return 0;
2121 }
2122
2123 /**
2124  * DOC: DM Lifecycle
2125  *
2126  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2127  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2128  * the base driver's device list to be initialized and torn down accordingly.
2129  *
2130  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2131  */
2132
2133 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2134         .name = "dm",
2135         .early_init = dm_early_init,
2136         .late_init = dm_late_init,
2137         .sw_init = dm_sw_init,
2138         .sw_fini = dm_sw_fini,
2139         .hw_init = dm_hw_init,
2140         .hw_fini = dm_hw_fini,
2141         .suspend = dm_suspend,
2142         .resume = dm_resume,
2143         .is_idle = dm_is_idle,
2144         .wait_for_idle = dm_wait_for_idle,
2145         .check_soft_reset = dm_check_soft_reset,
2146         .soft_reset = dm_soft_reset,
2147         .set_clockgating_state = dm_set_clockgating_state,
2148         .set_powergating_state = dm_set_powergating_state,
2149 };
2150
2151 const struct amdgpu_ip_block_version dm_ip_block =
2152 {
2153         .type = AMD_IP_BLOCK_TYPE_DCE,
2154         .major = 1,
2155         .minor = 0,
2156         .rev = 0,
2157         .funcs = &amdgpu_dm_funcs,
2158 };
2159
2160
2161 /**
2162  * DOC: atomic
2163  *
2164  * *WIP*
2165  */
2166
2167 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2168         .fb_create = amdgpu_display_user_framebuffer_create,
2169         .get_format_info = amd_get_format_info,
2170         .output_poll_changed = drm_fb_helper_output_poll_changed,
2171         .atomic_check = amdgpu_dm_atomic_check,
2172         .atomic_commit = drm_atomic_helper_commit,
2173 };
2174
2175 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2176         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2177 };
2178
2179 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2180 {
2181         u32 max_cll, min_cll, max, min, q, r;
2182         struct amdgpu_dm_backlight_caps *caps;
2183         struct amdgpu_display_manager *dm;
2184         struct drm_connector *conn_base;
2185         struct amdgpu_device *adev;
2186         struct dc_link *link = NULL;
2187         static const u8 pre_computed_values[] = {
2188                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2189                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2190
2191         if (!aconnector || !aconnector->dc_link)
2192                 return;
2193
2194         link = aconnector->dc_link;
2195         if (link->connector_signal != SIGNAL_TYPE_EDP)
2196                 return;
2197
2198         conn_base = &aconnector->base;
2199         adev = drm_to_adev(conn_base->dev);
2200         dm = &adev->dm;
2201         caps = &dm->backlight_caps;
2202         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2203         caps->aux_support = false;
2204         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2205         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2206
2207         if (caps->ext_caps->bits.oled == 1 ||
2208             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2209             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2210                 caps->aux_support = true;
2211
2212         /* From the specification (CTA-861-G), for calculating the maximum
2213          * luminance we need to use:
2214          *      Luminance = 50*2**(CV/32)
2215          * Where CV is a one-byte value.
2216          * For calculating this expression we may need float point precision;
2217          * to avoid this complexity level, we take advantage that CV is divided
2218          * by a constant. From the Euclids division algorithm, we know that CV
2219          * can be written as: CV = 32*q + r. Next, we replace CV in the
2220          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2221          * need to pre-compute the value of r/32. For pre-computing the values
2222          * We just used the following Ruby line:
2223          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2224          * The results of the above expressions can be verified at
2225          * pre_computed_values.
2226          */
2227         q = max_cll >> 5;
2228         r = max_cll % 32;
2229         max = (1 << q) * pre_computed_values[r];
2230
2231         // min luminance: maxLum * (CV/255)^2 / 100
2232         q = DIV_ROUND_CLOSEST(min_cll, 255);
2233         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2234
2235         caps->aux_max_input_signal = max;
2236         caps->aux_min_input_signal = min;
2237 }
2238
2239 void amdgpu_dm_update_connector_after_detect(
2240                 struct amdgpu_dm_connector *aconnector)
2241 {
2242         struct drm_connector *connector = &aconnector->base;
2243         struct drm_device *dev = connector->dev;
2244         struct dc_sink *sink;
2245
2246         /* MST handled by drm_mst framework */
2247         if (aconnector->mst_mgr.mst_state == true)
2248                 return;
2249
2250         sink = aconnector->dc_link->local_sink;
2251         if (sink)
2252                 dc_sink_retain(sink);
2253
2254         /*
2255          * Edid mgmt connector gets first update only in mode_valid hook and then
2256          * the connector sink is set to either fake or physical sink depends on link status.
2257          * Skip if already done during boot.
2258          */
2259         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2260                         && aconnector->dc_em_sink) {
2261
2262                 /*
2263                  * For S3 resume with headless use eml_sink to fake stream
2264                  * because on resume connector->sink is set to NULL
2265                  */
2266                 mutex_lock(&dev->mode_config.mutex);
2267
2268                 if (sink) {
2269                         if (aconnector->dc_sink) {
2270                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2271                                 /*
2272                                  * retain and release below are used to
2273                                  * bump up refcount for sink because the link doesn't point
2274                                  * to it anymore after disconnect, so on next crtc to connector
2275                                  * reshuffle by UMD we will get into unwanted dc_sink release
2276                                  */
2277                                 dc_sink_release(aconnector->dc_sink);
2278                         }
2279                         aconnector->dc_sink = sink;
2280                         dc_sink_retain(aconnector->dc_sink);
2281                         amdgpu_dm_update_freesync_caps(connector,
2282                                         aconnector->edid);
2283                 } else {
2284                         amdgpu_dm_update_freesync_caps(connector, NULL);
2285                         if (!aconnector->dc_sink) {
2286                                 aconnector->dc_sink = aconnector->dc_em_sink;
2287                                 dc_sink_retain(aconnector->dc_sink);
2288                         }
2289                 }
2290
2291                 mutex_unlock(&dev->mode_config.mutex);
2292
2293                 if (sink)
2294                         dc_sink_release(sink);
2295                 return;
2296         }
2297
2298         /*
2299          * TODO: temporary guard to look for proper fix
2300          * if this sink is MST sink, we should not do anything
2301          */
2302         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2303                 dc_sink_release(sink);
2304                 return;
2305         }
2306
2307         if (aconnector->dc_sink == sink) {
2308                 /*
2309                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2310                  * Do nothing!!
2311                  */
2312                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2313                                 aconnector->connector_id);
2314                 if (sink)
2315                         dc_sink_release(sink);
2316                 return;
2317         }
2318
2319         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2320                 aconnector->connector_id, aconnector->dc_sink, sink);
2321
2322         mutex_lock(&dev->mode_config.mutex);
2323
2324         /*
2325          * 1. Update status of the drm connector
2326          * 2. Send an event and let userspace tell us what to do
2327          */
2328         if (sink) {
2329                 /*
2330                  * TODO: check if we still need the S3 mode update workaround.
2331                  * If yes, put it here.
2332                  */
2333                 if (aconnector->dc_sink)
2334                         amdgpu_dm_update_freesync_caps(connector, NULL);
2335
2336                 aconnector->dc_sink = sink;
2337                 dc_sink_retain(aconnector->dc_sink);
2338                 if (sink->dc_edid.length == 0) {
2339                         aconnector->edid = NULL;
2340                         if (aconnector->dc_link->aux_mode) {
2341                                 drm_dp_cec_unset_edid(
2342                                         &aconnector->dm_dp_aux.aux);
2343                         }
2344                 } else {
2345                         aconnector->edid =
2346                                 (struct edid *)sink->dc_edid.raw_edid;
2347
2348                         drm_connector_update_edid_property(connector,
2349                                                            aconnector->edid);
2350                         drm_add_edid_modes(connector, aconnector->edid);
2351
2352                         if (aconnector->dc_link->aux_mode)
2353                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2354                                                     aconnector->edid);
2355                 }
2356
2357                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2358                 update_connector_ext_caps(aconnector);
2359         } else {
2360                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2361                 amdgpu_dm_update_freesync_caps(connector, NULL);
2362                 drm_connector_update_edid_property(connector, NULL);
2363                 aconnector->num_modes = 0;
2364                 dc_sink_release(aconnector->dc_sink);
2365                 aconnector->dc_sink = NULL;
2366                 aconnector->edid = NULL;
2367 #ifdef CONFIG_DRM_AMD_DC_HDCP
2368                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2369                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2370                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2371 #endif
2372         }
2373
2374         mutex_unlock(&dev->mode_config.mutex);
2375
2376         update_subconnector_property(aconnector);
2377
2378         if (sink)
2379                 dc_sink_release(sink);
2380 }
2381
2382 static void handle_hpd_irq(void *param)
2383 {
2384         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2385         struct drm_connector *connector = &aconnector->base;
2386         struct drm_device *dev = connector->dev;
2387         enum dc_connection_type new_connection_type = dc_connection_none;
2388 #ifdef CONFIG_DRM_AMD_DC_HDCP
2389         struct amdgpu_device *adev = drm_to_adev(dev);
2390         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2391 #endif
2392
2393         /*
2394          * In case of failure or MST no need to update connector status or notify the OS
2395          * since (for MST case) MST does this in its own context.
2396          */
2397         mutex_lock(&aconnector->hpd_lock);
2398
2399 #ifdef CONFIG_DRM_AMD_DC_HDCP
2400         if (adev->dm.hdcp_workqueue) {
2401                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2402                 dm_con_state->update_hdcp = true;
2403         }
2404 #endif
2405         if (aconnector->fake_enable)
2406                 aconnector->fake_enable = false;
2407
2408         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2409                 DRM_ERROR("KMS: Failed to detect connector\n");
2410
2411         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2412                 emulated_link_detect(aconnector->dc_link);
2413
2414
2415                 drm_modeset_lock_all(dev);
2416                 dm_restore_drm_connector_state(dev, connector);
2417                 drm_modeset_unlock_all(dev);
2418
2419                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2420                         drm_kms_helper_hotplug_event(dev);
2421
2422         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2423                 if (new_connection_type == dc_connection_none &&
2424                     aconnector->dc_link->type == dc_connection_none)
2425                         dm_set_dpms_off(aconnector->dc_link);
2426
2427                 amdgpu_dm_update_connector_after_detect(aconnector);
2428
2429                 drm_modeset_lock_all(dev);
2430                 dm_restore_drm_connector_state(dev, connector);
2431                 drm_modeset_unlock_all(dev);
2432
2433                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2434                         drm_kms_helper_hotplug_event(dev);
2435         }
2436         mutex_unlock(&aconnector->hpd_lock);
2437
2438 }
2439
2440 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2441 {
2442         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2443         uint8_t dret;
2444         bool new_irq_handled = false;
2445         int dpcd_addr;
2446         int dpcd_bytes_to_read;
2447
2448         const int max_process_count = 30;
2449         int process_count = 0;
2450
2451         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2452
2453         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2454                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2455                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2456                 dpcd_addr = DP_SINK_COUNT;
2457         } else {
2458                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2459                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2460                 dpcd_addr = DP_SINK_COUNT_ESI;
2461         }
2462
2463         dret = drm_dp_dpcd_read(
2464                 &aconnector->dm_dp_aux.aux,
2465                 dpcd_addr,
2466                 esi,
2467                 dpcd_bytes_to_read);
2468
2469         while (dret == dpcd_bytes_to_read &&
2470                 process_count < max_process_count) {
2471                 uint8_t retry;
2472                 dret = 0;
2473
2474                 process_count++;
2475
2476                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2477                 /* handle HPD short pulse irq */
2478                 if (aconnector->mst_mgr.mst_state)
2479                         drm_dp_mst_hpd_irq(
2480                                 &aconnector->mst_mgr,
2481                                 esi,
2482                                 &new_irq_handled);
2483
2484                 if (new_irq_handled) {
2485                         /* ACK at DPCD to notify down stream */
2486                         const int ack_dpcd_bytes_to_write =
2487                                 dpcd_bytes_to_read - 1;
2488
2489                         for (retry = 0; retry < 3; retry++) {
2490                                 uint8_t wret;
2491
2492                                 wret = drm_dp_dpcd_write(
2493                                         &aconnector->dm_dp_aux.aux,
2494                                         dpcd_addr + 1,
2495                                         &esi[1],
2496                                         ack_dpcd_bytes_to_write);
2497                                 if (wret == ack_dpcd_bytes_to_write)
2498                                         break;
2499                         }
2500
2501                         /* check if there is new irq to be handled */
2502                         dret = drm_dp_dpcd_read(
2503                                 &aconnector->dm_dp_aux.aux,
2504                                 dpcd_addr,
2505                                 esi,
2506                                 dpcd_bytes_to_read);
2507
2508                         new_irq_handled = false;
2509                 } else {
2510                         break;
2511                 }
2512         }
2513
2514         if (process_count == max_process_count)
2515                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2516 }
2517
2518 static void handle_hpd_rx_irq(void *param)
2519 {
2520         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2521         struct drm_connector *connector = &aconnector->base;
2522         struct drm_device *dev = connector->dev;
2523         struct dc_link *dc_link = aconnector->dc_link;
2524         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2525         bool result = false;
2526         enum dc_connection_type new_connection_type = dc_connection_none;
2527         struct amdgpu_device *adev = drm_to_adev(dev);
2528         union hpd_irq_data hpd_irq_data;
2529
2530         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2531
2532         /*
2533          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2534          * conflict, after implement i2c helper, this mutex should be
2535          * retired.
2536          */
2537         if (dc_link->type != dc_connection_mst_branch)
2538                 mutex_lock(&aconnector->hpd_lock);
2539
2540         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2541
2542         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2543                 (dc_link->type == dc_connection_mst_branch)) {
2544                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2545                         result = true;
2546                         dm_handle_hpd_rx_irq(aconnector);
2547                         goto out;
2548                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2549                         result = false;
2550                         dm_handle_hpd_rx_irq(aconnector);
2551                         goto out;
2552                 }
2553         }
2554
2555         mutex_lock(&adev->dm.dc_lock);
2556 #ifdef CONFIG_DRM_AMD_DC_HDCP
2557         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2558 #else
2559         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2560 #endif
2561         mutex_unlock(&adev->dm.dc_lock);
2562
2563 out:
2564         if (result && !is_mst_root_connector) {
2565                 /* Downstream Port status changed. */
2566                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2567                         DRM_ERROR("KMS: Failed to detect connector\n");
2568
2569                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2570                         emulated_link_detect(dc_link);
2571
2572                         if (aconnector->fake_enable)
2573                                 aconnector->fake_enable = false;
2574
2575                         amdgpu_dm_update_connector_after_detect(aconnector);
2576
2577
2578                         drm_modeset_lock_all(dev);
2579                         dm_restore_drm_connector_state(dev, connector);
2580                         drm_modeset_unlock_all(dev);
2581
2582                         drm_kms_helper_hotplug_event(dev);
2583                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2584
2585                         if (aconnector->fake_enable)
2586                                 aconnector->fake_enable = false;
2587
2588                         amdgpu_dm_update_connector_after_detect(aconnector);
2589
2590
2591                         drm_modeset_lock_all(dev);
2592                         dm_restore_drm_connector_state(dev, connector);
2593                         drm_modeset_unlock_all(dev);
2594
2595                         drm_kms_helper_hotplug_event(dev);
2596                 }
2597         }
2598 #ifdef CONFIG_DRM_AMD_DC_HDCP
2599         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2600                 if (adev->dm.hdcp_workqueue)
2601                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2602         }
2603 #endif
2604
2605         if (dc_link->type != dc_connection_mst_branch) {
2606                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2607                 mutex_unlock(&aconnector->hpd_lock);
2608         }
2609 }
2610
2611 static void register_hpd_handlers(struct amdgpu_device *adev)
2612 {
2613         struct drm_device *dev = adev_to_drm(adev);
2614         struct drm_connector *connector;
2615         struct amdgpu_dm_connector *aconnector;
2616         const struct dc_link *dc_link;
2617         struct dc_interrupt_params int_params = {0};
2618
2619         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2620         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2621
2622         list_for_each_entry(connector,
2623                         &dev->mode_config.connector_list, head) {
2624
2625                 aconnector = to_amdgpu_dm_connector(connector);
2626                 dc_link = aconnector->dc_link;
2627
2628                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2629                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2630                         int_params.irq_source = dc_link->irq_source_hpd;
2631
2632                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2633                                         handle_hpd_irq,
2634                                         (void *) aconnector);
2635                 }
2636
2637                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2638
2639                         /* Also register for DP short pulse (hpd_rx). */
2640                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2641                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2642
2643                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2644                                         handle_hpd_rx_irq,
2645                                         (void *) aconnector);
2646                 }
2647         }
2648 }
2649
2650 #if defined(CONFIG_DRM_AMD_DC_SI)
2651 /* Register IRQ sources and initialize IRQ callbacks */
2652 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2653 {
2654         struct dc *dc = adev->dm.dc;
2655         struct common_irq_params *c_irq_params;
2656         struct dc_interrupt_params int_params = {0};
2657         int r;
2658         int i;
2659         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2660
2661         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2662         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2663
2664         /*
2665          * Actions of amdgpu_irq_add_id():
2666          * 1. Register a set() function with base driver.
2667          *    Base driver will call set() function to enable/disable an
2668          *    interrupt in DC hardware.
2669          * 2. Register amdgpu_dm_irq_handler().
2670          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2671          *    coming from DC hardware.
2672          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2673          *    for acknowledging and handling. */
2674
2675         /* Use VBLANK interrupt */
2676         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2677                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2678                 if (r) {
2679                         DRM_ERROR("Failed to add crtc irq id!\n");
2680                         return r;
2681                 }
2682
2683                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2684                 int_params.irq_source =
2685                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2686
2687                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2688
2689                 c_irq_params->adev = adev;
2690                 c_irq_params->irq_src = int_params.irq_source;
2691
2692                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2693                                 dm_crtc_high_irq, c_irq_params);
2694         }
2695
2696         /* Use GRPH_PFLIP interrupt */
2697         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2698                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2699                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2700                 if (r) {
2701                         DRM_ERROR("Failed to add page flip irq id!\n");
2702                         return r;
2703                 }
2704
2705                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2706                 int_params.irq_source =
2707                         dc_interrupt_to_irq_source(dc, i, 0);
2708
2709                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2710
2711                 c_irq_params->adev = adev;
2712                 c_irq_params->irq_src = int_params.irq_source;
2713
2714                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2715                                 dm_pflip_high_irq, c_irq_params);
2716
2717         }
2718
2719         /* HPD */
2720         r = amdgpu_irq_add_id(adev, client_id,
2721                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2722         if (r) {
2723                 DRM_ERROR("Failed to add hpd irq id!\n");
2724                 return r;
2725         }
2726
2727         register_hpd_handlers(adev);
2728
2729         return 0;
2730 }
2731 #endif
2732
2733 /* Register IRQ sources and initialize IRQ callbacks */
2734 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2735 {
2736         struct dc *dc = adev->dm.dc;
2737         struct common_irq_params *c_irq_params;
2738         struct dc_interrupt_params int_params = {0};
2739         int r;
2740         int i;
2741         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2742
2743         if (adev->asic_type >= CHIP_VEGA10)
2744                 client_id = SOC15_IH_CLIENTID_DCE;
2745
2746         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2747         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2748
2749         /*
2750          * Actions of amdgpu_irq_add_id():
2751          * 1. Register a set() function with base driver.
2752          *    Base driver will call set() function to enable/disable an
2753          *    interrupt in DC hardware.
2754          * 2. Register amdgpu_dm_irq_handler().
2755          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2756          *    coming from DC hardware.
2757          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2758          *    for acknowledging and handling. */
2759
2760         /* Use VBLANK interrupt */
2761         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2762                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2763                 if (r) {
2764                         DRM_ERROR("Failed to add crtc irq id!\n");
2765                         return r;
2766                 }
2767
2768                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2769                 int_params.irq_source =
2770                         dc_interrupt_to_irq_source(dc, i, 0);
2771
2772                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2773
2774                 c_irq_params->adev = adev;
2775                 c_irq_params->irq_src = int_params.irq_source;
2776
2777                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2778                                 dm_crtc_high_irq, c_irq_params);
2779         }
2780
2781         /* Use VUPDATE interrupt */
2782         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2783                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2784                 if (r) {
2785                         DRM_ERROR("Failed to add vupdate irq id!\n");
2786                         return r;
2787                 }
2788
2789                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2790                 int_params.irq_source =
2791                         dc_interrupt_to_irq_source(dc, i, 0);
2792
2793                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2794
2795                 c_irq_params->adev = adev;
2796                 c_irq_params->irq_src = int_params.irq_source;
2797
2798                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2799                                 dm_vupdate_high_irq, c_irq_params);
2800         }
2801
2802         /* Use GRPH_PFLIP interrupt */
2803         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2804                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2805                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2806                 if (r) {
2807                         DRM_ERROR("Failed to add page flip irq id!\n");
2808                         return r;
2809                 }
2810
2811                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2812                 int_params.irq_source =
2813                         dc_interrupt_to_irq_source(dc, i, 0);
2814
2815                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2816
2817                 c_irq_params->adev = adev;
2818                 c_irq_params->irq_src = int_params.irq_source;
2819
2820                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2821                                 dm_pflip_high_irq, c_irq_params);
2822
2823         }
2824
2825         /* HPD */
2826         r = amdgpu_irq_add_id(adev, client_id,
2827                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2828         if (r) {
2829                 DRM_ERROR("Failed to add hpd irq id!\n");
2830                 return r;
2831         }
2832
2833         register_hpd_handlers(adev);
2834
2835         return 0;
2836 }
2837
2838 #if defined(CONFIG_DRM_AMD_DC_DCN)
2839 /* Register IRQ sources and initialize IRQ callbacks */
2840 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2841 {
2842         struct dc *dc = adev->dm.dc;
2843         struct common_irq_params *c_irq_params;
2844         struct dc_interrupt_params int_params = {0};
2845         int r;
2846         int i;
2847
2848         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2849         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2850
2851         /*
2852          * Actions of amdgpu_irq_add_id():
2853          * 1. Register a set() function with base driver.
2854          *    Base driver will call set() function to enable/disable an
2855          *    interrupt in DC hardware.
2856          * 2. Register amdgpu_dm_irq_handler().
2857          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2858          *    coming from DC hardware.
2859          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2860          *    for acknowledging and handling.
2861          */
2862
2863         /* Use VSTARTUP interrupt */
2864         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2865                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2866                         i++) {
2867                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2868
2869                 if (r) {
2870                         DRM_ERROR("Failed to add crtc irq id!\n");
2871                         return r;
2872                 }
2873
2874                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2875                 int_params.irq_source =
2876                         dc_interrupt_to_irq_source(dc, i, 0);
2877
2878                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2879
2880                 c_irq_params->adev = adev;
2881                 c_irq_params->irq_src = int_params.irq_source;
2882
2883                 amdgpu_dm_irq_register_interrupt(
2884                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2885         }
2886
2887         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2888          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2889          * to trigger at end of each vblank, regardless of state of the lock,
2890          * matching DCE behaviour.
2891          */
2892         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2893              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2894              i++) {
2895                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2896
2897                 if (r) {
2898                         DRM_ERROR("Failed to add vupdate irq id!\n");
2899                         return r;
2900                 }
2901
2902                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2903                 int_params.irq_source =
2904                         dc_interrupt_to_irq_source(dc, i, 0);
2905
2906                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2907
2908                 c_irq_params->adev = adev;
2909                 c_irq_params->irq_src = int_params.irq_source;
2910
2911                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2912                                 dm_vupdate_high_irq, c_irq_params);
2913         }
2914
2915         /* Use GRPH_PFLIP interrupt */
2916         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2917                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2918                         i++) {
2919                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2920                 if (r) {
2921                         DRM_ERROR("Failed to add page flip irq id!\n");
2922                         return r;
2923                 }
2924
2925                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2926                 int_params.irq_source =
2927                         dc_interrupt_to_irq_source(dc, i, 0);
2928
2929                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2930
2931                 c_irq_params->adev = adev;
2932                 c_irq_params->irq_src = int_params.irq_source;
2933
2934                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2935                                 dm_pflip_high_irq, c_irq_params);
2936
2937         }
2938
2939         /* HPD */
2940         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2941                         &adev->hpd_irq);
2942         if (r) {
2943                 DRM_ERROR("Failed to add hpd irq id!\n");
2944                 return r;
2945         }
2946
2947         register_hpd_handlers(adev);
2948
2949         return 0;
2950 }
2951 #endif
2952
2953 /*
2954  * Acquires the lock for the atomic state object and returns
2955  * the new atomic state.
2956  *
2957  * This should only be called during atomic check.
2958  */
2959 static int dm_atomic_get_state(struct drm_atomic_state *state,
2960                                struct dm_atomic_state **dm_state)
2961 {
2962         struct drm_device *dev = state->dev;
2963         struct amdgpu_device *adev = drm_to_adev(dev);
2964         struct amdgpu_display_manager *dm = &adev->dm;
2965         struct drm_private_state *priv_state;
2966
2967         if (*dm_state)
2968                 return 0;
2969
2970         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2971         if (IS_ERR(priv_state))
2972                 return PTR_ERR(priv_state);
2973
2974         *dm_state = to_dm_atomic_state(priv_state);
2975
2976         return 0;
2977 }
2978
2979 static struct dm_atomic_state *
2980 dm_atomic_get_new_state(struct drm_atomic_state *state)
2981 {
2982         struct drm_device *dev = state->dev;
2983         struct amdgpu_device *adev = drm_to_adev(dev);
2984         struct amdgpu_display_manager *dm = &adev->dm;
2985         struct drm_private_obj *obj;
2986         struct drm_private_state *new_obj_state;
2987         int i;
2988
2989         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2990                 if (obj->funcs == dm->atomic_obj.funcs)
2991                         return to_dm_atomic_state(new_obj_state);
2992         }
2993
2994         return NULL;
2995 }
2996
2997 static struct drm_private_state *
2998 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2999 {
3000         struct dm_atomic_state *old_state, *new_state;
3001
3002         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3003         if (!new_state)
3004                 return NULL;
3005
3006         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3007
3008         old_state = to_dm_atomic_state(obj->state);
3009
3010         if (old_state && old_state->context)
3011                 new_state->context = dc_copy_state(old_state->context);
3012
3013         if (!new_state->context) {
3014                 kfree(new_state);
3015                 return NULL;
3016         }
3017
3018         return &new_state->base;
3019 }
3020
3021 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3022                                     struct drm_private_state *state)
3023 {
3024         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3025
3026         if (dm_state && dm_state->context)
3027                 dc_release_state(dm_state->context);
3028
3029         kfree(dm_state);
3030 }
3031
3032 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3033         .atomic_duplicate_state = dm_atomic_duplicate_state,
3034         .atomic_destroy_state = dm_atomic_destroy_state,
3035 };
3036
3037 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3038 {
3039         struct dm_atomic_state *state;
3040         int r;
3041
3042         adev->mode_info.mode_config_initialized = true;
3043
3044         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3045         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3046
3047         adev_to_drm(adev)->mode_config.max_width = 16384;
3048         adev_to_drm(adev)->mode_config.max_height = 16384;
3049
3050         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3051         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3052         /* indicates support for immediate flip */
3053         adev_to_drm(adev)->mode_config.async_page_flip = true;
3054
3055         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3056
3057         state = kzalloc(sizeof(*state), GFP_KERNEL);
3058         if (!state)
3059                 return -ENOMEM;
3060
3061         state->context = dc_create_state(adev->dm.dc);
3062         if (!state->context) {
3063                 kfree(state);
3064                 return -ENOMEM;
3065         }
3066
3067         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3068
3069         drm_atomic_private_obj_init(adev_to_drm(adev),
3070                                     &adev->dm.atomic_obj,
3071                                     &state->base,
3072                                     &dm_atomic_state_funcs);
3073
3074         r = amdgpu_display_modeset_create_props(adev);
3075         if (r) {
3076                 dc_release_state(state->context);
3077                 kfree(state);
3078                 return r;
3079         }
3080
3081         r = amdgpu_dm_audio_init(adev);
3082         if (r) {
3083                 dc_release_state(state->context);
3084                 kfree(state);
3085                 return r;
3086         }
3087
3088         return 0;
3089 }
3090
3091 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3092 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3093 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3094
3095 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3096         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3097
3098 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3099 {
3100 #if defined(CONFIG_ACPI)
3101         struct amdgpu_dm_backlight_caps caps;
3102
3103         memset(&caps, 0, sizeof(caps));
3104
3105         if (dm->backlight_caps.caps_valid)
3106                 return;
3107
3108         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3109         if (caps.caps_valid) {
3110                 dm->backlight_caps.caps_valid = true;
3111                 if (caps.aux_support)
3112                         return;
3113                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3114                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3115         } else {
3116                 dm->backlight_caps.min_input_signal =
3117                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3118                 dm->backlight_caps.max_input_signal =
3119                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3120         }
3121 #else
3122         if (dm->backlight_caps.aux_support)
3123                 return;
3124
3125         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3126         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3127 #endif
3128 }
3129
3130 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3131 {
3132         bool rc;
3133
3134         if (!link)
3135                 return 1;
3136
3137         rc = dc_link_set_backlight_level_nits(link, true, brightness,
3138                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3139
3140         return rc ? 0 : 1;
3141 }
3142
3143 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3144                                 unsigned *min, unsigned *max)
3145 {
3146         if (!caps)
3147                 return 0;
3148
3149         if (caps->aux_support) {
3150                 // Firmware limits are in nits, DC API wants millinits.
3151                 *max = 1000 * caps->aux_max_input_signal;
3152                 *min = 1000 * caps->aux_min_input_signal;
3153         } else {
3154                 // Firmware limits are 8-bit, PWM control is 16-bit.
3155                 *max = 0x101 * caps->max_input_signal;
3156                 *min = 0x101 * caps->min_input_signal;
3157         }
3158         return 1;
3159 }
3160
3161 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3162                                         uint32_t brightness)
3163 {
3164         unsigned min, max;
3165
3166         if (!get_brightness_range(caps, &min, &max))
3167                 return brightness;
3168
3169         // Rescale 0..255 to min..max
3170         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3171                                        AMDGPU_MAX_BL_LEVEL);
3172 }
3173
3174 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3175                                       uint32_t brightness)
3176 {
3177         unsigned min, max;
3178
3179         if (!get_brightness_range(caps, &min, &max))
3180                 return brightness;
3181
3182         if (brightness < min)
3183                 return 0;
3184         // Rescale min..max to 0..255
3185         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3186                                  max - min);
3187 }
3188
3189 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3190 {
3191         struct amdgpu_display_manager *dm = bl_get_data(bd);
3192         struct amdgpu_dm_backlight_caps caps;
3193         struct dc_link *link = NULL;
3194         u32 brightness;
3195         bool rc;
3196
3197         amdgpu_dm_update_backlight_caps(dm);
3198         caps = dm->backlight_caps;
3199
3200         link = (struct dc_link *)dm->backlight_link;
3201
3202         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3203         // Change brightness based on AUX property
3204         if (caps.aux_support)
3205                 return set_backlight_via_aux(link, brightness);
3206
3207         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3208
3209         return rc ? 0 : 1;
3210 }
3211
3212 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3213 {
3214         struct amdgpu_display_manager *dm = bl_get_data(bd);
3215         int ret = dc_link_get_backlight_level(dm->backlight_link);
3216
3217         if (ret == DC_ERROR_UNEXPECTED)
3218                 return bd->props.brightness;
3219         return convert_brightness_to_user(&dm->backlight_caps, ret);
3220 }
3221
3222 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3223         .options = BL_CORE_SUSPENDRESUME,
3224         .get_brightness = amdgpu_dm_backlight_get_brightness,
3225         .update_status  = amdgpu_dm_backlight_update_status,
3226 };
3227
3228 static void
3229 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3230 {
3231         char bl_name[16];
3232         struct backlight_properties props = { 0 };
3233
3234         amdgpu_dm_update_backlight_caps(dm);
3235
3236         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3237         props.brightness = AMDGPU_MAX_BL_LEVEL;
3238         props.type = BACKLIGHT_RAW;
3239
3240         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3241                  adev_to_drm(dm->adev)->primary->index);
3242
3243         dm->backlight_dev = backlight_device_register(bl_name,
3244                                                       adev_to_drm(dm->adev)->dev,
3245                                                       dm,
3246                                                       &amdgpu_dm_backlight_ops,
3247                                                       &props);
3248
3249         if (IS_ERR(dm->backlight_dev))
3250                 DRM_ERROR("DM: Backlight registration failed!\n");
3251         else
3252                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3253 }
3254
3255 #endif
3256
3257 static int initialize_plane(struct amdgpu_display_manager *dm,
3258                             struct amdgpu_mode_info *mode_info, int plane_id,
3259                             enum drm_plane_type plane_type,
3260                             const struct dc_plane_cap *plane_cap)
3261 {
3262         struct drm_plane *plane;
3263         unsigned long possible_crtcs;
3264         int ret = 0;
3265
3266         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3267         if (!plane) {
3268                 DRM_ERROR("KMS: Failed to allocate plane\n");
3269                 return -ENOMEM;
3270         }
3271         plane->type = plane_type;
3272
3273         /*
3274          * HACK: IGT tests expect that the primary plane for a CRTC
3275          * can only have one possible CRTC. Only expose support for
3276          * any CRTC if they're not going to be used as a primary plane
3277          * for a CRTC - like overlay or underlay planes.
3278          */
3279         possible_crtcs = 1 << plane_id;
3280         if (plane_id >= dm->dc->caps.max_streams)
3281                 possible_crtcs = 0xff;
3282
3283         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3284
3285         if (ret) {
3286                 DRM_ERROR("KMS: Failed to initialize plane\n");
3287                 kfree(plane);
3288                 return ret;
3289         }
3290
3291         if (mode_info)
3292                 mode_info->planes[plane_id] = plane;
3293
3294         return ret;
3295 }
3296
3297
3298 static void register_backlight_device(struct amdgpu_display_manager *dm,
3299                                       struct dc_link *link)
3300 {
3301 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3302         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3303
3304         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3305             link->type != dc_connection_none) {
3306                 /*
3307                  * Event if registration failed, we should continue with
3308                  * DM initialization because not having a backlight control
3309                  * is better then a black screen.
3310                  */
3311                 amdgpu_dm_register_backlight_device(dm);
3312
3313                 if (dm->backlight_dev)
3314                         dm->backlight_link = link;
3315         }
3316 #endif
3317 }
3318
3319
3320 /*
3321  * In this architecture, the association
3322  * connector -> encoder -> crtc
3323  * id not really requried. The crtc and connector will hold the
3324  * display_index as an abstraction to use with DAL component
3325  *
3326  * Returns 0 on success
3327  */
3328 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3329 {
3330         struct amdgpu_display_manager *dm = &adev->dm;
3331         int32_t i;
3332         struct amdgpu_dm_connector *aconnector = NULL;
3333         struct amdgpu_encoder *aencoder = NULL;
3334         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3335         uint32_t link_cnt;
3336         int32_t primary_planes;
3337         enum dc_connection_type new_connection_type = dc_connection_none;
3338         const struct dc_plane_cap *plane;
3339
3340         dm->display_indexes_num = dm->dc->caps.max_streams;
3341         /* Update the actual used number of crtc */
3342         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3343
3344         link_cnt = dm->dc->caps.max_links;
3345         if (amdgpu_dm_mode_config_init(dm->adev)) {
3346                 DRM_ERROR("DM: Failed to initialize mode config\n");
3347                 return -EINVAL;
3348         }
3349
3350         /* There is one primary plane per CRTC */
3351         primary_planes = dm->dc->caps.max_streams;
3352         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3353
3354         /*
3355          * Initialize primary planes, implicit planes for legacy IOCTLS.
3356          * Order is reversed to match iteration order in atomic check.
3357          */
3358         for (i = (primary_planes - 1); i >= 0; i--) {
3359                 plane = &dm->dc->caps.planes[i];
3360
3361                 if (initialize_plane(dm, mode_info, i,
3362                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3363                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3364                         goto fail;
3365                 }
3366         }
3367
3368         /*
3369          * Initialize overlay planes, index starting after primary planes.
3370          * These planes have a higher DRM index than the primary planes since
3371          * they should be considered as having a higher z-order.
3372          * Order is reversed to match iteration order in atomic check.
3373          *
3374          * Only support DCN for now, and only expose one so we don't encourage
3375          * userspace to use up all the pipes.
3376          */
3377         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3378                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3379
3380                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3381                         continue;
3382
3383                 if (!plane->blends_with_above || !plane->blends_with_below)
3384                         continue;
3385
3386                 if (!plane->pixel_format_support.argb8888)
3387                         continue;
3388
3389                 if (initialize_plane(dm, NULL, primary_planes + i,
3390                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3391                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3392                         goto fail;
3393                 }
3394
3395                 /* Only create one overlay plane. */
3396                 break;
3397         }
3398
3399         for (i = 0; i < dm->dc->caps.max_streams; i++)
3400                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3401                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3402                         goto fail;
3403                 }
3404
3405         /* loops over all connectors on the board */
3406         for (i = 0; i < link_cnt; i++) {
3407                 struct dc_link *link = NULL;
3408
3409                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3410                         DRM_ERROR(
3411                                 "KMS: Cannot support more than %d display indexes\n",
3412                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3413                         continue;
3414                 }
3415
3416                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3417                 if (!aconnector)
3418                         goto fail;
3419
3420                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3421                 if (!aencoder)
3422                         goto fail;
3423
3424                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3425                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3426                         goto fail;
3427                 }
3428
3429                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3430                         DRM_ERROR("KMS: Failed to initialize connector\n");
3431                         goto fail;
3432                 }
3433
3434                 link = dc_get_link_at_index(dm->dc, i);
3435
3436                 if (!dc_link_detect_sink(link, &new_connection_type))
3437                         DRM_ERROR("KMS: Failed to detect connector\n");
3438
3439                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3440                         emulated_link_detect(link);
3441                         amdgpu_dm_update_connector_after_detect(aconnector);
3442
3443                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3444                         amdgpu_dm_update_connector_after_detect(aconnector);
3445                         register_backlight_device(dm, link);
3446                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3447                                 amdgpu_dm_set_psr_caps(link);
3448                 }
3449
3450
3451         }
3452
3453         /* Software is initialized. Now we can register interrupt handlers. */
3454         switch (adev->asic_type) {
3455 #if defined(CONFIG_DRM_AMD_DC_SI)
3456         case CHIP_TAHITI:
3457         case CHIP_PITCAIRN:
3458         case CHIP_VERDE:
3459         case CHIP_OLAND:
3460                 if (dce60_register_irq_handlers(dm->adev)) {
3461                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3462                         goto fail;
3463                 }
3464                 break;
3465 #endif
3466         case CHIP_BONAIRE:
3467         case CHIP_HAWAII:
3468         case CHIP_KAVERI:
3469         case CHIP_KABINI:
3470         case CHIP_MULLINS:
3471         case CHIP_TONGA:
3472         case CHIP_FIJI:
3473         case CHIP_CARRIZO:
3474         case CHIP_STONEY:
3475         case CHIP_POLARIS11:
3476         case CHIP_POLARIS10:
3477         case CHIP_POLARIS12:
3478         case CHIP_VEGAM:
3479         case CHIP_VEGA10:
3480         case CHIP_VEGA12:
3481         case CHIP_VEGA20:
3482                 if (dce110_register_irq_handlers(dm->adev)) {
3483                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3484                         goto fail;
3485                 }
3486                 break;
3487 #if defined(CONFIG_DRM_AMD_DC_DCN)
3488         case CHIP_RAVEN:
3489         case CHIP_NAVI12:
3490         case CHIP_NAVI10:
3491         case CHIP_NAVI14:
3492         case CHIP_RENOIR:
3493         case CHIP_SIENNA_CICHLID:
3494         case CHIP_NAVY_FLOUNDER:
3495         case CHIP_DIMGREY_CAVEFISH:
3496         case CHIP_VANGOGH:
3497                 if (dcn10_register_irq_handlers(dm->adev)) {
3498                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3499                         goto fail;
3500                 }
3501                 break;
3502 #endif
3503         default:
3504                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3505                 goto fail;
3506         }
3507
3508         return 0;
3509 fail:
3510         kfree(aencoder);
3511         kfree(aconnector);
3512
3513         return -EINVAL;
3514 }
3515
3516 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3517 {
3518         drm_mode_config_cleanup(dm->ddev);
3519         drm_atomic_private_obj_fini(&dm->atomic_obj);
3520         return;
3521 }
3522
3523 /******************************************************************************
3524  * amdgpu_display_funcs functions
3525  *****************************************************************************/
3526
3527 /*
3528  * dm_bandwidth_update - program display watermarks
3529  *
3530  * @adev: amdgpu_device pointer
3531  *
3532  * Calculate and program the display watermarks and line buffer allocation.
3533  */
3534 static void dm_bandwidth_update(struct amdgpu_device *adev)
3535 {
3536         /* TODO: implement later */
3537 }
3538
3539 static const struct amdgpu_display_funcs dm_display_funcs = {
3540         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3541         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3542         .backlight_set_level = NULL, /* never called for DC */
3543         .backlight_get_level = NULL, /* never called for DC */
3544         .hpd_sense = NULL,/* called unconditionally */
3545         .hpd_set_polarity = NULL, /* called unconditionally */
3546         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3547         .page_flip_get_scanoutpos =
3548                 dm_crtc_get_scanoutpos,/* called unconditionally */
3549         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3550         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3551 };
3552
3553 #if defined(CONFIG_DEBUG_KERNEL_DC)
3554
3555 static ssize_t s3_debug_store(struct device *device,
3556                               struct device_attribute *attr,
3557                               const char *buf,
3558                               size_t count)
3559 {
3560         int ret;
3561         int s3_state;
3562         struct drm_device *drm_dev = dev_get_drvdata(device);
3563         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3564
3565         ret = kstrtoint(buf, 0, &s3_state);
3566
3567         if (ret == 0) {
3568                 if (s3_state) {
3569                         dm_resume(adev);
3570                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3571                 } else
3572                         dm_suspend(adev);
3573         }
3574
3575         return ret == 0 ? count : 0;
3576 }
3577
3578 DEVICE_ATTR_WO(s3_debug);
3579
3580 #endif
3581
3582 static int dm_early_init(void *handle)
3583 {
3584         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3585
3586         switch (adev->asic_type) {
3587 #if defined(CONFIG_DRM_AMD_DC_SI)
3588         case CHIP_TAHITI:
3589         case CHIP_PITCAIRN:
3590         case CHIP_VERDE:
3591                 adev->mode_info.num_crtc = 6;
3592                 adev->mode_info.num_hpd = 6;
3593                 adev->mode_info.num_dig = 6;
3594                 break;
3595         case CHIP_OLAND:
3596                 adev->mode_info.num_crtc = 2;
3597                 adev->mode_info.num_hpd = 2;
3598                 adev->mode_info.num_dig = 2;
3599                 break;
3600 #endif
3601         case CHIP_BONAIRE:
3602         case CHIP_HAWAII:
3603                 adev->mode_info.num_crtc = 6;
3604                 adev->mode_info.num_hpd = 6;
3605                 adev->mode_info.num_dig = 6;
3606                 break;
3607         case CHIP_KAVERI:
3608                 adev->mode_info.num_crtc = 4;
3609                 adev->mode_info.num_hpd = 6;
3610                 adev->mode_info.num_dig = 7;
3611                 break;
3612         case CHIP_KABINI:
3613         case CHIP_MULLINS:
3614                 adev->mode_info.num_crtc = 2;
3615                 adev->mode_info.num_hpd = 6;
3616                 adev->mode_info.num_dig = 6;
3617                 break;
3618         case CHIP_FIJI:
3619         case CHIP_TONGA:
3620                 adev->mode_info.num_crtc = 6;
3621                 adev->mode_info.num_hpd = 6;
3622                 adev->mode_info.num_dig = 7;
3623                 break;
3624         case CHIP_CARRIZO:
3625                 adev->mode_info.num_crtc = 3;
3626                 adev->mode_info.num_hpd = 6;
3627                 adev->mode_info.num_dig = 9;
3628                 break;
3629         case CHIP_STONEY:
3630                 adev->mode_info.num_crtc = 2;
3631                 adev->mode_info.num_hpd = 6;
3632                 adev->mode_info.num_dig = 9;
3633                 break;
3634         case CHIP_POLARIS11:
3635         case CHIP_POLARIS12:
3636                 adev->mode_info.num_crtc = 5;
3637                 adev->mode_info.num_hpd = 5;
3638                 adev->mode_info.num_dig = 5;
3639                 break;
3640         case CHIP_POLARIS10:
3641         case CHIP_VEGAM:
3642                 adev->mode_info.num_crtc = 6;
3643                 adev->mode_info.num_hpd = 6;
3644                 adev->mode_info.num_dig = 6;
3645                 break;
3646         case CHIP_VEGA10:
3647         case CHIP_VEGA12:
3648         case CHIP_VEGA20:
3649                 adev->mode_info.num_crtc = 6;
3650                 adev->mode_info.num_hpd = 6;
3651                 adev->mode_info.num_dig = 6;
3652                 break;
3653 #if defined(CONFIG_DRM_AMD_DC_DCN)
3654         case CHIP_RAVEN:
3655         case CHIP_RENOIR:
3656         case CHIP_VANGOGH:
3657                 adev->mode_info.num_crtc = 4;
3658                 adev->mode_info.num_hpd = 4;
3659                 adev->mode_info.num_dig = 4;
3660                 break;
3661         case CHIP_NAVI10:
3662         case CHIP_NAVI12:
3663         case CHIP_SIENNA_CICHLID:
3664         case CHIP_NAVY_FLOUNDER:
3665                 adev->mode_info.num_crtc = 6;
3666                 adev->mode_info.num_hpd = 6;
3667                 adev->mode_info.num_dig = 6;
3668                 break;
3669         case CHIP_NAVI14:
3670         case CHIP_DIMGREY_CAVEFISH:
3671                 adev->mode_info.num_crtc = 5;
3672                 adev->mode_info.num_hpd = 5;
3673                 adev->mode_info.num_dig = 5;
3674                 break;
3675 #endif
3676         default:
3677                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3678                 return -EINVAL;
3679         }
3680
3681         amdgpu_dm_set_irq_funcs(adev);
3682
3683         if (adev->mode_info.funcs == NULL)
3684                 adev->mode_info.funcs = &dm_display_funcs;
3685
3686         /*
3687          * Note: Do NOT change adev->audio_endpt_rreg and
3688          * adev->audio_endpt_wreg because they are initialised in
3689          * amdgpu_device_init()
3690          */
3691 #if defined(CONFIG_DEBUG_KERNEL_DC)
3692         device_create_file(
3693                 adev_to_drm(adev)->dev,
3694                 &dev_attr_s3_debug);
3695 #endif
3696
3697         return 0;
3698 }
3699
3700 static bool modeset_required(struct drm_crtc_state *crtc_state,
3701                              struct dc_stream_state *new_stream,
3702                              struct dc_stream_state *old_stream)
3703 {
3704         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3705 }
3706
3707 static bool modereset_required(struct drm_crtc_state *crtc_state)
3708 {
3709         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3710 }
3711
3712 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3713 {
3714         drm_encoder_cleanup(encoder);
3715         kfree(encoder);
3716 }
3717
3718 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3719         .destroy = amdgpu_dm_encoder_destroy,
3720 };
3721
3722
3723 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3724                                 struct dc_scaling_info *scaling_info)
3725 {
3726         int scale_w, scale_h;
3727
3728         memset(scaling_info, 0, sizeof(*scaling_info));
3729
3730         /* Source is fixed 16.16 but we ignore mantissa for now... */
3731         scaling_info->src_rect.x = state->src_x >> 16;
3732         scaling_info->src_rect.y = state->src_y >> 16;
3733
3734         scaling_info->src_rect.width = state->src_w >> 16;
3735         if (scaling_info->src_rect.width == 0)
3736                 return -EINVAL;
3737
3738         scaling_info->src_rect.height = state->src_h >> 16;
3739         if (scaling_info->src_rect.height == 0)
3740                 return -EINVAL;
3741
3742         scaling_info->dst_rect.x = state->crtc_x;
3743         scaling_info->dst_rect.y = state->crtc_y;
3744
3745         if (state->crtc_w == 0)
3746                 return -EINVAL;
3747
3748         scaling_info->dst_rect.width = state->crtc_w;
3749
3750         if (state->crtc_h == 0)
3751                 return -EINVAL;
3752
3753         scaling_info->dst_rect.height = state->crtc_h;
3754
3755         /* DRM doesn't specify clipping on destination output. */
3756         scaling_info->clip_rect = scaling_info->dst_rect;
3757
3758         /* TODO: Validate scaling per-format with DC plane caps */
3759         scale_w = scaling_info->dst_rect.width * 1000 /
3760                   scaling_info->src_rect.width;
3761
3762         if (scale_w < 250 || scale_w > 16000)
3763                 return -EINVAL;
3764
3765         scale_h = scaling_info->dst_rect.height * 1000 /
3766                   scaling_info->src_rect.height;
3767
3768         if (scale_h < 250 || scale_h > 16000)
3769                 return -EINVAL;
3770
3771         /*
3772          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3773          * assume reasonable defaults based on the format.
3774          */
3775
3776         return 0;
3777 }
3778
3779 static void
3780 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3781                                  uint64_t tiling_flags)
3782 {
3783         /* Fill GFX8 params */
3784         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3785                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3786
3787                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3788                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3789                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3790                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3791                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3792
3793                 /* XXX fix me for VI */
3794                 tiling_info->gfx8.num_banks = num_banks;
3795                 tiling_info->gfx8.array_mode =
3796                                 DC_ARRAY_2D_TILED_THIN1;
3797                 tiling_info->gfx8.tile_split = tile_split;
3798                 tiling_info->gfx8.bank_width = bankw;
3799                 tiling_info->gfx8.bank_height = bankh;
3800                 tiling_info->gfx8.tile_aspect = mtaspect;
3801                 tiling_info->gfx8.tile_mode =
3802                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3803         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3804                         == DC_ARRAY_1D_TILED_THIN1) {
3805                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3806         }
3807
3808         tiling_info->gfx8.pipe_config =
3809                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3810 }
3811
3812 static void
3813 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3814                                   union dc_tiling_info *tiling_info)
3815 {
3816         tiling_info->gfx9.num_pipes =
3817                 adev->gfx.config.gb_addr_config_fields.num_pipes;
3818         tiling_info->gfx9.num_banks =
3819                 adev->gfx.config.gb_addr_config_fields.num_banks;
3820         tiling_info->gfx9.pipe_interleave =
3821                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3822         tiling_info->gfx9.num_shader_engines =
3823                 adev->gfx.config.gb_addr_config_fields.num_se;
3824         tiling_info->gfx9.max_compressed_frags =
3825                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3826         tiling_info->gfx9.num_rb_per_se =
3827                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3828         tiling_info->gfx9.shaderEnable = 1;
3829         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3830             adev->asic_type == CHIP_NAVY_FLOUNDER ||
3831             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3832             adev->asic_type == CHIP_VANGOGH)
3833                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3834 }
3835
3836 static int
3837 validate_dcc(struct amdgpu_device *adev,
3838              const enum surface_pixel_format format,
3839              const enum dc_rotation_angle rotation,
3840              const union dc_tiling_info *tiling_info,
3841              const struct dc_plane_dcc_param *dcc,
3842              const struct dc_plane_address *address,
3843              const struct plane_size *plane_size)
3844 {
3845         struct dc *dc = adev->dm.dc;
3846         struct dc_dcc_surface_param input;
3847         struct dc_surface_dcc_cap output;
3848
3849         memset(&input, 0, sizeof(input));
3850         memset(&output, 0, sizeof(output));
3851
3852         if (!dcc->enable)
3853                 return 0;
3854
3855         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3856             !dc->cap_funcs.get_dcc_compression_cap)
3857                 return -EINVAL;
3858
3859         input.format = format;
3860         input.surface_size.width = plane_size->surface_size.width;
3861         input.surface_size.height = plane_size->surface_size.height;
3862         input.swizzle_mode = tiling_info->gfx9.swizzle;
3863
3864         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3865                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3866         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3867                 input.scan = SCAN_DIRECTION_VERTICAL;
3868
3869         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3870                 return -EINVAL;
3871
3872         if (!output.capable)
3873                 return -EINVAL;
3874
3875         if (dcc->independent_64b_blks == 0 &&
3876             output.grph.rgb.independent_64b_blks != 0)
3877                 return -EINVAL;
3878
3879         return 0;
3880 }
3881
3882 static bool
3883 modifier_has_dcc(uint64_t modifier)
3884 {
3885         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3886 }
3887
3888 static unsigned
3889 modifier_gfx9_swizzle_mode(uint64_t modifier)
3890 {
3891         if (modifier == DRM_FORMAT_MOD_LINEAR)
3892                 return 0;
3893
3894         return AMD_FMT_MOD_GET(TILE, modifier);
3895 }
3896
3897 static const struct drm_format_info *
3898 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3899 {
3900         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3901 }
3902
3903 static void
3904 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3905                                     union dc_tiling_info *tiling_info,
3906                                     uint64_t modifier)
3907 {
3908         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3909         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3910         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3911         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3912
3913         fill_gfx9_tiling_info_from_device(adev, tiling_info);
3914
3915         if (!IS_AMD_FMT_MOD(modifier))
3916                 return;
3917
3918         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3919         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3920
3921         if (adev->family >= AMDGPU_FAMILY_NV) {
3922                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3923         } else {
3924                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3925
3926                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3927         }
3928 }
3929
3930 enum dm_micro_swizzle {
3931         MICRO_SWIZZLE_Z = 0,
3932         MICRO_SWIZZLE_S = 1,
3933         MICRO_SWIZZLE_D = 2,
3934         MICRO_SWIZZLE_R = 3
3935 };
3936
3937 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3938                                           uint32_t format,
3939                                           uint64_t modifier)
3940 {
3941         struct amdgpu_device *adev = drm_to_adev(plane->dev);
3942         const struct drm_format_info *info = drm_format_info(format);
3943
3944         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3945
3946         if (!info)
3947                 return false;
3948
3949         /*
3950          * We always have to allow this modifier, because core DRM still
3951          * checks LINEAR support if userspace does not provide modifers.
3952          */
3953         if (modifier == DRM_FORMAT_MOD_LINEAR)
3954                 return true;
3955
3956         /*
3957          * The arbitrary tiling support for multiplane formats has not been hooked
3958          * up.
3959          */
3960         if (info->num_planes > 1)
3961                 return false;
3962
3963         /*
3964          * For D swizzle the canonical modifier depends on the bpp, so check
3965          * it here.
3966          */
3967         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
3968             adev->family >= AMDGPU_FAMILY_NV) {
3969                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
3970                         return false;
3971         }
3972
3973         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
3974             info->cpp[0] < 8)
3975                 return false;
3976
3977         if (modifier_has_dcc(modifier)) {
3978                 /* Per radeonsi comments 16/64 bpp are more complicated. */
3979                 if (info->cpp[0] != 4)
3980                         return false;
3981         }
3982
3983         return true;
3984 }
3985
3986 static void
3987 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
3988 {
3989         if (!*mods)
3990                 return;
3991
3992         if (*cap - *size < 1) {
3993                 uint64_t new_cap = *cap * 2;
3994                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
3995
3996                 if (!new_mods) {
3997                         kfree(*mods);
3998                         *mods = NULL;
3999                         return;
4000                 }
4001
4002                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4003                 kfree(*mods);
4004                 *mods = new_mods;
4005                 *cap = new_cap;
4006         }
4007
4008         (*mods)[*size] = mod;
4009         *size += 1;
4010 }
4011
4012 static void
4013 add_gfx9_modifiers(const struct amdgpu_device *adev,
4014                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4015 {
4016         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4017         int pipe_xor_bits = min(8, pipes +
4018                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4019         int bank_xor_bits = min(8 - pipe_xor_bits,
4020                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4021         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4022                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4023
4024
4025         if (adev->family == AMDGPU_FAMILY_RV) {
4026                 /* Raven2 and later */
4027                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4028
4029                 /*
4030                  * No _D DCC swizzles yet because we only allow 32bpp, which
4031                  * doesn't support _D on DCN
4032                  */
4033
4034                 if (has_constant_encode) {
4035                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4036                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4037                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4038                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4039                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4040                                     AMD_FMT_MOD_SET(DCC, 1) |
4041                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4042                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4043                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4044                 }
4045
4046                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4047                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4048                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4049                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4050                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4051                             AMD_FMT_MOD_SET(DCC, 1) |
4052                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4053                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4054                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4055
4056                 if (has_constant_encode) {
4057                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4058                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4059                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4060                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4061                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4062                                     AMD_FMT_MOD_SET(DCC, 1) |
4063                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4064                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4065                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4066
4067                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4068                                     AMD_FMT_MOD_SET(RB, rb) |
4069                                     AMD_FMT_MOD_SET(PIPE, pipes));
4070                 }
4071
4072                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4073                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4074                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4075                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4076                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4077                             AMD_FMT_MOD_SET(DCC, 1) |
4078                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4079                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4080                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4081                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4082                             AMD_FMT_MOD_SET(RB, rb) |
4083                             AMD_FMT_MOD_SET(PIPE, pipes));
4084         }
4085
4086         /*
4087          * Only supported for 64bpp on Raven, will be filtered on format in
4088          * dm_plane_format_mod_supported.
4089          */
4090         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4091                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4092                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4093                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4094                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4095
4096         if (adev->family == AMDGPU_FAMILY_RV) {
4097                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4098                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4099                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4100                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4101                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4102         }
4103
4104         /*
4105          * Only supported for 64bpp on Raven, will be filtered on format in
4106          * dm_plane_format_mod_supported.
4107          */
4108         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4109                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4110                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4111
4112         if (adev->family == AMDGPU_FAMILY_RV) {
4113                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4114                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4115                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4116         }
4117 }
4118
4119 static void
4120 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4121                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4122 {
4123         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4124
4125         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4126                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4127                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4128                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4129                     AMD_FMT_MOD_SET(DCC, 1) |
4130                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4131                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4132                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4133
4134         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4135                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4136                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4137                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4138                     AMD_FMT_MOD_SET(DCC, 1) |
4139                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4140                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4141                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4142                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4143
4144         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4145                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4146                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4147                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4148
4149         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4150                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4151                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4152                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4153
4154
4155         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4156         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4157                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4158                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4159
4160         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4161                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4162                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4163 }
4164
4165 static void
4166 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4167                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4168 {
4169         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4170         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4171
4172         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4173                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4174                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4175                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4176                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4177                     AMD_FMT_MOD_SET(DCC, 1) |
4178                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4179                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4180                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4181                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4182
4183         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4184                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4185                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4186                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4187                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4188                     AMD_FMT_MOD_SET(DCC, 1) |
4189                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4190                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4191                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4192                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4193                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4194
4195         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4196                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4197                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4198                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4199                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4200
4201         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4202                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4203                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4204                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4205                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4206
4207         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4208         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4209                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4210                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4211
4212         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4213                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4214                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4215 }
4216
4217 static int
4218 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4219 {
4220         uint64_t size = 0, capacity = 128;
4221         *mods = NULL;
4222
4223         /* We have not hooked up any pre-GFX9 modifiers. */
4224         if (adev->family < AMDGPU_FAMILY_AI)
4225                 return 0;
4226
4227         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4228
4229         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4230                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4231                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4232                 return *mods ? 0 : -ENOMEM;
4233         }
4234
4235         switch (adev->family) {
4236         case AMDGPU_FAMILY_AI:
4237         case AMDGPU_FAMILY_RV:
4238                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4239                 break;
4240         case AMDGPU_FAMILY_NV:
4241         case AMDGPU_FAMILY_VGH:
4242                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4243                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4244                 else
4245                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4246                 break;
4247         }
4248
4249         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4250
4251         /* INVALID marks the end of the list. */
4252         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4253
4254         if (!*mods)
4255                 return -ENOMEM;
4256
4257         return 0;
4258 }
4259
4260 static int
4261 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4262                                           const struct amdgpu_framebuffer *afb,
4263                                           const enum surface_pixel_format format,
4264                                           const enum dc_rotation_angle rotation,
4265                                           const struct plane_size *plane_size,
4266                                           union dc_tiling_info *tiling_info,
4267                                           struct dc_plane_dcc_param *dcc,
4268                                           struct dc_plane_address *address,
4269                                           const bool force_disable_dcc)
4270 {
4271         const uint64_t modifier = afb->base.modifier;
4272         int ret;
4273
4274         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4275         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4276
4277         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4278                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4279
4280                 dcc->enable = 1;
4281                 dcc->meta_pitch = afb->base.pitches[1];
4282                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4283
4284                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4285                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4286         }
4287
4288         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4289         if (ret)
4290                 return ret;
4291
4292         return 0;
4293 }
4294
4295 static int
4296 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4297                              const struct amdgpu_framebuffer *afb,
4298                              const enum surface_pixel_format format,
4299                              const enum dc_rotation_angle rotation,
4300                              const uint64_t tiling_flags,
4301                              union dc_tiling_info *tiling_info,
4302                              struct plane_size *plane_size,
4303                              struct dc_plane_dcc_param *dcc,
4304                              struct dc_plane_address *address,
4305                              bool tmz_surface,
4306                              bool force_disable_dcc)
4307 {
4308         const struct drm_framebuffer *fb = &afb->base;
4309         int ret;
4310
4311         memset(tiling_info, 0, sizeof(*tiling_info));
4312         memset(plane_size, 0, sizeof(*plane_size));
4313         memset(dcc, 0, sizeof(*dcc));
4314         memset(address, 0, sizeof(*address));
4315
4316         address->tmz_surface = tmz_surface;
4317
4318         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4319                 uint64_t addr = afb->address + fb->offsets[0];
4320
4321                 plane_size->surface_size.x = 0;
4322                 plane_size->surface_size.y = 0;
4323                 plane_size->surface_size.width = fb->width;
4324                 plane_size->surface_size.height = fb->height;
4325                 plane_size->surface_pitch =
4326                         fb->pitches[0] / fb->format->cpp[0];
4327
4328                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4329                 address->grph.addr.low_part = lower_32_bits(addr);
4330                 address->grph.addr.high_part = upper_32_bits(addr);
4331         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4332                 uint64_t luma_addr = afb->address + fb->offsets[0];
4333                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4334
4335                 plane_size->surface_size.x = 0;
4336                 plane_size->surface_size.y = 0;
4337                 plane_size->surface_size.width = fb->width;
4338                 plane_size->surface_size.height = fb->height;
4339                 plane_size->surface_pitch =
4340                         fb->pitches[0] / fb->format->cpp[0];
4341
4342                 plane_size->chroma_size.x = 0;
4343                 plane_size->chroma_size.y = 0;
4344                 /* TODO: set these based on surface format */
4345                 plane_size->chroma_size.width = fb->width / 2;
4346                 plane_size->chroma_size.height = fb->height / 2;
4347
4348                 plane_size->chroma_pitch =
4349                         fb->pitches[1] / fb->format->cpp[1];
4350
4351                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4352                 address->video_progressive.luma_addr.low_part =
4353                         lower_32_bits(luma_addr);
4354                 address->video_progressive.luma_addr.high_part =
4355                         upper_32_bits(luma_addr);
4356                 address->video_progressive.chroma_addr.low_part =
4357                         lower_32_bits(chroma_addr);
4358                 address->video_progressive.chroma_addr.high_part =
4359                         upper_32_bits(chroma_addr);
4360         }
4361
4362         if (adev->family >= AMDGPU_FAMILY_AI) {
4363                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4364                                                                 rotation, plane_size,
4365                                                                 tiling_info, dcc,
4366                                                                 address,
4367                                                                 force_disable_dcc);
4368                 if (ret)
4369                         return ret;
4370         } else {
4371                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4372         }
4373
4374         return 0;
4375 }
4376
4377 static void
4378 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4379                                bool *per_pixel_alpha, bool *global_alpha,
4380                                int *global_alpha_value)
4381 {
4382         *per_pixel_alpha = false;
4383         *global_alpha = false;
4384         *global_alpha_value = 0xff;
4385
4386         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4387                 return;
4388
4389         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4390                 static const uint32_t alpha_formats[] = {
4391                         DRM_FORMAT_ARGB8888,
4392                         DRM_FORMAT_RGBA8888,
4393                         DRM_FORMAT_ABGR8888,
4394                 };
4395                 uint32_t format = plane_state->fb->format->format;
4396                 unsigned int i;
4397
4398                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4399                         if (format == alpha_formats[i]) {
4400                                 *per_pixel_alpha = true;
4401                                 break;
4402                         }
4403                 }
4404         }
4405
4406         if (plane_state->alpha < 0xffff) {
4407                 *global_alpha = true;
4408                 *global_alpha_value = plane_state->alpha >> 8;
4409         }
4410 }
4411
4412 static int
4413 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4414                             const enum surface_pixel_format format,
4415                             enum dc_color_space *color_space)
4416 {
4417         bool full_range;
4418
4419         *color_space = COLOR_SPACE_SRGB;
4420
4421         /* DRM color properties only affect non-RGB formats. */
4422         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4423                 return 0;
4424
4425         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4426
4427         switch (plane_state->color_encoding) {
4428         case DRM_COLOR_YCBCR_BT601:
4429                 if (full_range)
4430                         *color_space = COLOR_SPACE_YCBCR601;
4431                 else
4432                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4433                 break;
4434
4435         case DRM_COLOR_YCBCR_BT709:
4436                 if (full_range)
4437                         *color_space = COLOR_SPACE_YCBCR709;
4438                 else
4439                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4440                 break;
4441
4442         case DRM_COLOR_YCBCR_BT2020:
4443                 if (full_range)
4444                         *color_space = COLOR_SPACE_2020_YCBCR;
4445                 else
4446                         return -EINVAL;
4447                 break;
4448
4449         default:
4450                 return -EINVAL;
4451         }
4452
4453         return 0;
4454 }
4455
4456 static int
4457 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4458                             const struct drm_plane_state *plane_state,
4459                             const uint64_t tiling_flags,
4460                             struct dc_plane_info *plane_info,
4461                             struct dc_plane_address *address,
4462                             bool tmz_surface,
4463                             bool force_disable_dcc)
4464 {
4465         const struct drm_framebuffer *fb = plane_state->fb;
4466         const struct amdgpu_framebuffer *afb =
4467                 to_amdgpu_framebuffer(plane_state->fb);
4468         struct drm_format_name_buf format_name;
4469         int ret;
4470
4471         memset(plane_info, 0, sizeof(*plane_info));
4472
4473         switch (fb->format->format) {
4474         case DRM_FORMAT_C8:
4475                 plane_info->format =
4476                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4477                 break;
4478         case DRM_FORMAT_RGB565:
4479                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4480                 break;
4481         case DRM_FORMAT_XRGB8888:
4482         case DRM_FORMAT_ARGB8888:
4483                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4484                 break;
4485         case DRM_FORMAT_XRGB2101010:
4486         case DRM_FORMAT_ARGB2101010:
4487                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4488                 break;
4489         case DRM_FORMAT_XBGR2101010:
4490         case DRM_FORMAT_ABGR2101010:
4491                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4492                 break;
4493         case DRM_FORMAT_XBGR8888:
4494         case DRM_FORMAT_ABGR8888:
4495                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4496                 break;
4497         case DRM_FORMAT_NV21:
4498                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4499                 break;
4500         case DRM_FORMAT_NV12:
4501                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4502                 break;
4503         case DRM_FORMAT_P010:
4504                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4505                 break;
4506         case DRM_FORMAT_XRGB16161616F:
4507         case DRM_FORMAT_ARGB16161616F:
4508                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4509                 break;
4510         case DRM_FORMAT_XBGR16161616F:
4511         case DRM_FORMAT_ABGR16161616F:
4512                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4513                 break;
4514         default:
4515                 DRM_ERROR(
4516                         "Unsupported screen format %s\n",
4517                         drm_get_format_name(fb->format->format, &format_name));
4518                 return -EINVAL;
4519         }
4520
4521         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4522         case DRM_MODE_ROTATE_0:
4523                 plane_info->rotation = ROTATION_ANGLE_0;
4524                 break;
4525         case DRM_MODE_ROTATE_90:
4526                 plane_info->rotation = ROTATION_ANGLE_90;
4527                 break;
4528         case DRM_MODE_ROTATE_180:
4529                 plane_info->rotation = ROTATION_ANGLE_180;
4530                 break;
4531         case DRM_MODE_ROTATE_270:
4532                 plane_info->rotation = ROTATION_ANGLE_270;
4533                 break;
4534         default:
4535                 plane_info->rotation = ROTATION_ANGLE_0;
4536                 break;
4537         }
4538
4539         plane_info->visible = true;
4540         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4541
4542         plane_info->layer_index = 0;
4543
4544         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4545                                           &plane_info->color_space);
4546         if (ret)
4547                 return ret;
4548
4549         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4550                                            plane_info->rotation, tiling_flags,
4551                                            &plane_info->tiling_info,
4552                                            &plane_info->plane_size,
4553                                            &plane_info->dcc, address, tmz_surface,
4554                                            force_disable_dcc);
4555         if (ret)
4556                 return ret;
4557
4558         fill_blending_from_plane_state(
4559                 plane_state, &plane_info->per_pixel_alpha,
4560                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4561
4562         return 0;
4563 }
4564
4565 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4566                                     struct dc_plane_state *dc_plane_state,
4567                                     struct drm_plane_state *plane_state,
4568                                     struct drm_crtc_state *crtc_state)
4569 {
4570         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4571         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4572         struct dc_scaling_info scaling_info;
4573         struct dc_plane_info plane_info;
4574         int ret;
4575         bool force_disable_dcc = false;
4576
4577         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4578         if (ret)
4579                 return ret;
4580
4581         dc_plane_state->src_rect = scaling_info.src_rect;
4582         dc_plane_state->dst_rect = scaling_info.dst_rect;
4583         dc_plane_state->clip_rect = scaling_info.clip_rect;
4584         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4585
4586         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4587         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4588                                           afb->tiling_flags,
4589                                           &plane_info,
4590                                           &dc_plane_state->address,
4591                                           afb->tmz_surface,
4592                                           force_disable_dcc);
4593         if (ret)
4594                 return ret;
4595
4596         dc_plane_state->format = plane_info.format;
4597         dc_plane_state->color_space = plane_info.color_space;
4598         dc_plane_state->format = plane_info.format;
4599         dc_plane_state->plane_size = plane_info.plane_size;
4600         dc_plane_state->rotation = plane_info.rotation;
4601         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4602         dc_plane_state->stereo_format = plane_info.stereo_format;
4603         dc_plane_state->tiling_info = plane_info.tiling_info;
4604         dc_plane_state->visible = plane_info.visible;
4605         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4606         dc_plane_state->global_alpha = plane_info.global_alpha;
4607         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4608         dc_plane_state->dcc = plane_info.dcc;
4609         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4610
4611         /*
4612          * Always set input transfer function, since plane state is refreshed
4613          * every time.
4614          */
4615         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4616         if (ret)
4617                 return ret;
4618
4619         return 0;
4620 }
4621
4622 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4623                                            const struct dm_connector_state *dm_state,
4624                                            struct dc_stream_state *stream)
4625 {
4626         enum amdgpu_rmx_type rmx_type;
4627
4628         struct rect src = { 0 }; /* viewport in composition space*/
4629         struct rect dst = { 0 }; /* stream addressable area */
4630
4631         /* no mode. nothing to be done */
4632         if (!mode)
4633                 return;
4634
4635         /* Full screen scaling by default */
4636         src.width = mode->hdisplay;
4637         src.height = mode->vdisplay;
4638         dst.width = stream->timing.h_addressable;
4639         dst.height = stream->timing.v_addressable;
4640
4641         if (dm_state) {
4642                 rmx_type = dm_state->scaling;
4643                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4644                         if (src.width * dst.height <
4645                                         src.height * dst.width) {
4646                                 /* height needs less upscaling/more downscaling */
4647                                 dst.width = src.width *
4648                                                 dst.height / src.height;
4649                         } else {
4650                                 /* width needs less upscaling/more downscaling */
4651                                 dst.height = src.height *
4652                                                 dst.width / src.width;
4653                         }
4654                 } else if (rmx_type == RMX_CENTER) {
4655                         dst = src;
4656                 }
4657
4658                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4659                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4660
4661                 if (dm_state->underscan_enable) {
4662                         dst.x += dm_state->underscan_hborder / 2;
4663                         dst.y += dm_state->underscan_vborder / 2;
4664                         dst.width -= dm_state->underscan_hborder;
4665                         dst.height -= dm_state->underscan_vborder;
4666                 }
4667         }
4668
4669         stream->src = src;
4670         stream->dst = dst;
4671
4672         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4673                         dst.x, dst.y, dst.width, dst.height);
4674
4675 }
4676
4677 static enum dc_color_depth
4678 convert_color_depth_from_display_info(const struct drm_connector *connector,
4679                                       bool is_y420, int requested_bpc)
4680 {
4681         uint8_t bpc;
4682
4683         if (is_y420) {
4684                 bpc = 8;
4685
4686                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4687                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4688                         bpc = 16;
4689                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4690                         bpc = 12;
4691                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4692                         bpc = 10;
4693         } else {
4694                 bpc = (uint8_t)connector->display_info.bpc;
4695                 /* Assume 8 bpc by default if no bpc is specified. */
4696                 bpc = bpc ? bpc : 8;
4697         }
4698
4699         if (requested_bpc > 0) {
4700                 /*
4701                  * Cap display bpc based on the user requested value.
4702                  *
4703                  * The value for state->max_bpc may not correctly updated
4704                  * depending on when the connector gets added to the state
4705                  * or if this was called outside of atomic check, so it
4706                  * can't be used directly.
4707                  */
4708                 bpc = min_t(u8, bpc, requested_bpc);
4709
4710                 /* Round down to the nearest even number. */
4711                 bpc = bpc - (bpc & 1);
4712         }
4713
4714         switch (bpc) {
4715         case 0:
4716                 /*
4717                  * Temporary Work around, DRM doesn't parse color depth for
4718                  * EDID revision before 1.4
4719                  * TODO: Fix edid parsing
4720                  */
4721                 return COLOR_DEPTH_888;
4722         case 6:
4723                 return COLOR_DEPTH_666;
4724         case 8:
4725                 return COLOR_DEPTH_888;
4726         case 10:
4727                 return COLOR_DEPTH_101010;
4728         case 12:
4729                 return COLOR_DEPTH_121212;
4730         case 14:
4731                 return COLOR_DEPTH_141414;
4732         case 16:
4733                 return COLOR_DEPTH_161616;
4734         default:
4735                 return COLOR_DEPTH_UNDEFINED;
4736         }
4737 }
4738
4739 static enum dc_aspect_ratio
4740 get_aspect_ratio(const struct drm_display_mode *mode_in)
4741 {
4742         /* 1-1 mapping, since both enums follow the HDMI spec. */
4743         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4744 }
4745
4746 static enum dc_color_space
4747 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4748 {
4749         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4750
4751         switch (dc_crtc_timing->pixel_encoding) {
4752         case PIXEL_ENCODING_YCBCR422:
4753         case PIXEL_ENCODING_YCBCR444:
4754         case PIXEL_ENCODING_YCBCR420:
4755         {
4756                 /*
4757                  * 27030khz is the separation point between HDTV and SDTV
4758                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4759                  * respectively
4760                  */
4761                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4762                         if (dc_crtc_timing->flags.Y_ONLY)
4763                                 color_space =
4764                                         COLOR_SPACE_YCBCR709_LIMITED;
4765                         else
4766                                 color_space = COLOR_SPACE_YCBCR709;
4767                 } else {
4768                         if (dc_crtc_timing->flags.Y_ONLY)
4769                                 color_space =
4770                                         COLOR_SPACE_YCBCR601_LIMITED;
4771                         else
4772                                 color_space = COLOR_SPACE_YCBCR601;
4773                 }
4774
4775         }
4776         break;
4777         case PIXEL_ENCODING_RGB:
4778                 color_space = COLOR_SPACE_SRGB;
4779                 break;
4780
4781         default:
4782                 WARN_ON(1);
4783                 break;
4784         }
4785
4786         return color_space;
4787 }
4788
4789 static bool adjust_colour_depth_from_display_info(
4790         struct dc_crtc_timing *timing_out,
4791         const struct drm_display_info *info)
4792 {
4793         enum dc_color_depth depth = timing_out->display_color_depth;
4794         int normalized_clk;
4795         do {
4796                 normalized_clk = timing_out->pix_clk_100hz / 10;
4797                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4798                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4799                         normalized_clk /= 2;
4800                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4801                 switch (depth) {
4802                 case COLOR_DEPTH_888:
4803                         break;
4804                 case COLOR_DEPTH_101010:
4805                         normalized_clk = (normalized_clk * 30) / 24;
4806                         break;
4807                 case COLOR_DEPTH_121212:
4808                         normalized_clk = (normalized_clk * 36) / 24;
4809                         break;
4810                 case COLOR_DEPTH_161616:
4811                         normalized_clk = (normalized_clk * 48) / 24;
4812                         break;
4813                 default:
4814                         /* The above depths are the only ones valid for HDMI. */
4815                         return false;
4816                 }
4817                 if (normalized_clk <= info->max_tmds_clock) {
4818                         timing_out->display_color_depth = depth;
4819                         return true;
4820                 }
4821         } while (--depth > COLOR_DEPTH_666);
4822         return false;
4823 }
4824
4825 static void fill_stream_properties_from_drm_display_mode(
4826         struct dc_stream_state *stream,
4827         const struct drm_display_mode *mode_in,
4828         const struct drm_connector *connector,
4829         const struct drm_connector_state *connector_state,
4830         const struct dc_stream_state *old_stream,
4831         int requested_bpc)
4832 {
4833         struct dc_crtc_timing *timing_out = &stream->timing;
4834         const struct drm_display_info *info = &connector->display_info;
4835         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4836         struct hdmi_vendor_infoframe hv_frame;
4837         struct hdmi_avi_infoframe avi_frame;
4838
4839         memset(&hv_frame, 0, sizeof(hv_frame));
4840         memset(&avi_frame, 0, sizeof(avi_frame));
4841
4842         timing_out->h_border_left = 0;
4843         timing_out->h_border_right = 0;
4844         timing_out->v_border_top = 0;
4845         timing_out->v_border_bottom = 0;
4846         /* TODO: un-hardcode */
4847         if (drm_mode_is_420_only(info, mode_in)
4848                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4849                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4850         else if (drm_mode_is_420_also(info, mode_in)
4851                         && aconnector->force_yuv420_output)
4852                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4853         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4854                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4855                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4856         else
4857                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4858
4859         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4860         timing_out->display_color_depth = convert_color_depth_from_display_info(
4861                 connector,
4862                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4863                 requested_bpc);
4864         timing_out->scan_type = SCANNING_TYPE_NODATA;
4865         timing_out->hdmi_vic = 0;
4866
4867         if(old_stream) {
4868                 timing_out->vic = old_stream->timing.vic;
4869                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4870                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4871         } else {
4872                 timing_out->vic = drm_match_cea_mode(mode_in);
4873                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4874                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4875                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4876                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4877         }
4878
4879         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4880                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4881                 timing_out->vic = avi_frame.video_code;
4882                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4883                 timing_out->hdmi_vic = hv_frame.vic;
4884         }
4885
4886         timing_out->h_addressable = mode_in->crtc_hdisplay;
4887         timing_out->h_total = mode_in->crtc_htotal;
4888         timing_out->h_sync_width =
4889                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4890         timing_out->h_front_porch =
4891                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4892         timing_out->v_total = mode_in->crtc_vtotal;
4893         timing_out->v_addressable = mode_in->crtc_vdisplay;
4894         timing_out->v_front_porch =
4895                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4896         timing_out->v_sync_width =
4897                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4898         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4899         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4900
4901         stream->output_color_space = get_output_color_space(timing_out);
4902
4903         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4904         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4905         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4906                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4907                     drm_mode_is_420_also(info, mode_in) &&
4908                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4909                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4910                         adjust_colour_depth_from_display_info(timing_out, info);
4911                 }
4912         }
4913 }
4914
4915 static void fill_audio_info(struct audio_info *audio_info,
4916                             const struct drm_connector *drm_connector,
4917                             const struct dc_sink *dc_sink)
4918 {
4919         int i = 0;
4920         int cea_revision = 0;
4921         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4922
4923         audio_info->manufacture_id = edid_caps->manufacturer_id;
4924         audio_info->product_id = edid_caps->product_id;
4925
4926         cea_revision = drm_connector->display_info.cea_rev;
4927
4928         strscpy(audio_info->display_name,
4929                 edid_caps->display_name,
4930                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4931
4932         if (cea_revision >= 3) {
4933                 audio_info->mode_count = edid_caps->audio_mode_count;
4934
4935                 for (i = 0; i < audio_info->mode_count; ++i) {
4936                         audio_info->modes[i].format_code =
4937                                         (enum audio_format_code)
4938                                         (edid_caps->audio_modes[i].format_code);
4939                         audio_info->modes[i].channel_count =
4940                                         edid_caps->audio_modes[i].channel_count;
4941                         audio_info->modes[i].sample_rates.all =
4942                                         edid_caps->audio_modes[i].sample_rate;
4943                         audio_info->modes[i].sample_size =
4944                                         edid_caps->audio_modes[i].sample_size;
4945                 }
4946         }
4947
4948         audio_info->flags.all = edid_caps->speaker_flags;
4949
4950         /* TODO: We only check for the progressive mode, check for interlace mode too */
4951         if (drm_connector->latency_present[0]) {
4952                 audio_info->video_latency = drm_connector->video_latency[0];
4953                 audio_info->audio_latency = drm_connector->audio_latency[0];
4954         }
4955
4956         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4957
4958 }
4959
4960 static void
4961 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4962                                       struct drm_display_mode *dst_mode)
4963 {
4964         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4965         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4966         dst_mode->crtc_clock = src_mode->crtc_clock;
4967         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4968         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4969         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4970         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4971         dst_mode->crtc_htotal = src_mode->crtc_htotal;
4972         dst_mode->crtc_hskew = src_mode->crtc_hskew;
4973         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4974         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4975         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4976         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4977         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4978 }
4979
4980 static void
4981 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4982                                         const struct drm_display_mode *native_mode,
4983                                         bool scale_enabled)
4984 {
4985         if (scale_enabled) {
4986                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4987         } else if (native_mode->clock == drm_mode->clock &&
4988                         native_mode->htotal == drm_mode->htotal &&
4989                         native_mode->vtotal == drm_mode->vtotal) {
4990                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4991         } else {
4992                 /* no scaling nor amdgpu inserted, no need to patch */
4993         }
4994 }
4995
4996 static struct dc_sink *
4997 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4998 {
4999         struct dc_sink_init_data sink_init_data = { 0 };
5000         struct dc_sink *sink = NULL;
5001         sink_init_data.link = aconnector->dc_link;
5002         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5003
5004         sink = dc_sink_create(&sink_init_data);
5005         if (!sink) {
5006                 DRM_ERROR("Failed to create sink!\n");
5007                 return NULL;
5008         }
5009         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5010
5011         return sink;
5012 }
5013
5014 static void set_multisync_trigger_params(
5015                 struct dc_stream_state *stream)
5016 {
5017         if (stream->triggered_crtc_reset.enabled) {
5018                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5019                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5020         }
5021 }
5022
5023 static void set_master_stream(struct dc_stream_state *stream_set[],
5024                               int stream_count)
5025 {
5026         int j, highest_rfr = 0, master_stream = 0;
5027
5028         for (j = 0;  j < stream_count; j++) {
5029                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5030                         int refresh_rate = 0;
5031
5032                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5033                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5034                         if (refresh_rate > highest_rfr) {
5035                                 highest_rfr = refresh_rate;
5036                                 master_stream = j;
5037                         }
5038                 }
5039         }
5040         for (j = 0;  j < stream_count; j++) {
5041                 if (stream_set[j])
5042                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5043         }
5044 }
5045
5046 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5047 {
5048         int i = 0;
5049
5050         if (context->stream_count < 2)
5051                 return;
5052         for (i = 0; i < context->stream_count ; i++) {
5053                 if (!context->streams[i])
5054                         continue;
5055                 /*
5056                  * TODO: add a function to read AMD VSDB bits and set
5057                  * crtc_sync_master.multi_sync_enabled flag
5058                  * For now it's set to false
5059                  */
5060                 set_multisync_trigger_params(context->streams[i]);
5061         }
5062         set_master_stream(context->streams, context->stream_count);
5063 }
5064
5065 static struct dc_stream_state *
5066 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5067                        const struct drm_display_mode *drm_mode,
5068                        const struct dm_connector_state *dm_state,
5069                        const struct dc_stream_state *old_stream,
5070                        int requested_bpc)
5071 {
5072         struct drm_display_mode *preferred_mode = NULL;
5073         struct drm_connector *drm_connector;
5074         const struct drm_connector_state *con_state =
5075                 dm_state ? &dm_state->base : NULL;
5076         struct dc_stream_state *stream = NULL;
5077         struct drm_display_mode mode = *drm_mode;
5078         bool native_mode_found = false;
5079         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5080         int mode_refresh;
5081         int preferred_refresh = 0;
5082 #if defined(CONFIG_DRM_AMD_DC_DCN)
5083         struct dsc_dec_dpcd_caps dsc_caps;
5084         uint32_t link_bandwidth_kbps;
5085 #endif
5086         struct dc_sink *sink = NULL;
5087         if (aconnector == NULL) {
5088                 DRM_ERROR("aconnector is NULL!\n");
5089                 return stream;
5090         }
5091
5092         drm_connector = &aconnector->base;
5093
5094         if (!aconnector->dc_sink) {
5095                 sink = create_fake_sink(aconnector);
5096                 if (!sink)
5097                         return stream;
5098         } else {
5099                 sink = aconnector->dc_sink;
5100                 dc_sink_retain(sink);
5101         }
5102
5103         stream = dc_create_stream_for_sink(sink);
5104
5105         if (stream == NULL) {
5106                 DRM_ERROR("Failed to create stream for sink!\n");
5107                 goto finish;
5108         }
5109
5110         stream->dm_stream_context = aconnector;
5111
5112         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5113                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5114
5115         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5116                 /* Search for preferred mode */
5117                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5118                         native_mode_found = true;
5119                         break;
5120                 }
5121         }
5122         if (!native_mode_found)
5123                 preferred_mode = list_first_entry_or_null(
5124                                 &aconnector->base.modes,
5125                                 struct drm_display_mode,
5126                                 head);
5127
5128         mode_refresh = drm_mode_vrefresh(&mode);
5129
5130         if (preferred_mode == NULL) {
5131                 /*
5132                  * This may not be an error, the use case is when we have no
5133                  * usermode calls to reset and set mode upon hotplug. In this
5134                  * case, we call set mode ourselves to restore the previous mode
5135                  * and the modelist may not be filled in in time.
5136                  */
5137                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5138         } else {
5139                 decide_crtc_timing_for_drm_display_mode(
5140                                 &mode, preferred_mode,
5141                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5142                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5143         }
5144
5145         if (!dm_state)
5146                 drm_mode_set_crtcinfo(&mode, 0);
5147
5148         /*
5149         * If scaling is enabled and refresh rate didn't change
5150         * we copy the vic and polarities of the old timings
5151         */
5152         if (!scale || mode_refresh != preferred_refresh)
5153                 fill_stream_properties_from_drm_display_mode(stream,
5154                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
5155         else
5156                 fill_stream_properties_from_drm_display_mode(stream,
5157                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5158
5159         stream->timing.flags.DSC = 0;
5160
5161         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5162 #if defined(CONFIG_DRM_AMD_DC_DCN)
5163                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5164                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5165                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5166                                       &dsc_caps);
5167                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5168                                                              dc_link_get_link_cap(aconnector->dc_link));
5169
5170                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5171                         /* Set DSC policy according to dsc_clock_en */
5172                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5173                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5174
5175                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5176                                                   &dsc_caps,
5177                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5178                                                   0,
5179                                                   link_bandwidth_kbps,
5180                                                   &stream->timing,
5181                                                   &stream->timing.dsc_cfg))
5182                                 stream->timing.flags.DSC = 1;
5183                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5184                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5185                                 stream->timing.flags.DSC = 1;
5186
5187                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5188                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5189
5190                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5191                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5192
5193                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5194                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5195                 }
5196 #endif
5197         }
5198
5199         update_stream_scaling_settings(&mode, dm_state, stream);
5200
5201         fill_audio_info(
5202                 &stream->audio_info,
5203                 drm_connector,
5204                 sink);
5205
5206         update_stream_signal(stream, sink);
5207
5208         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5209                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5210
5211         if (stream->link->psr_settings.psr_feature_enabled) {
5212                 //
5213                 // should decide stream support vsc sdp colorimetry capability
5214                 // before building vsc info packet
5215                 //
5216                 stream->use_vsc_sdp_for_colorimetry = false;
5217                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5218                         stream->use_vsc_sdp_for_colorimetry =
5219                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5220                 } else {
5221                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5222                                 stream->use_vsc_sdp_for_colorimetry = true;
5223                 }
5224                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5225         }
5226 finish:
5227         dc_sink_release(sink);
5228
5229         return stream;
5230 }
5231
5232 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5233 {
5234         drm_crtc_cleanup(crtc);
5235         kfree(crtc);
5236 }
5237
5238 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5239                                   struct drm_crtc_state *state)
5240 {
5241         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5242
5243         /* TODO Destroy dc_stream objects are stream object is flattened */
5244         if (cur->stream)
5245                 dc_stream_release(cur->stream);
5246
5247
5248         __drm_atomic_helper_crtc_destroy_state(state);
5249
5250
5251         kfree(state);
5252 }
5253
5254 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5255 {
5256         struct dm_crtc_state *state;
5257
5258         if (crtc->state)
5259                 dm_crtc_destroy_state(crtc, crtc->state);
5260
5261         state = kzalloc(sizeof(*state), GFP_KERNEL);
5262         if (WARN_ON(!state))
5263                 return;
5264
5265         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5266 }
5267
5268 static struct drm_crtc_state *
5269 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5270 {
5271         struct dm_crtc_state *state, *cur;
5272
5273         cur = to_dm_crtc_state(crtc->state);
5274
5275         if (WARN_ON(!crtc->state))
5276                 return NULL;
5277
5278         state = kzalloc(sizeof(*state), GFP_KERNEL);
5279         if (!state)
5280                 return NULL;
5281
5282         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5283
5284         if (cur->stream) {
5285                 state->stream = cur->stream;
5286                 dc_stream_retain(state->stream);
5287         }
5288
5289         state->active_planes = cur->active_planes;
5290         state->vrr_infopacket = cur->vrr_infopacket;
5291         state->abm_level = cur->abm_level;
5292         state->vrr_supported = cur->vrr_supported;
5293         state->freesync_config = cur->freesync_config;
5294         state->crc_src = cur->crc_src;
5295         state->cm_has_degamma = cur->cm_has_degamma;
5296         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5297
5298         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5299
5300         return &state->base;
5301 }
5302
5303 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5304 {
5305         enum dc_irq_source irq_source;
5306         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5307         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5308         int rc;
5309
5310         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5311
5312         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5313
5314         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5315                          acrtc->crtc_id, enable ? "en" : "dis", rc);
5316         return rc;
5317 }
5318
5319 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5320 {
5321         enum dc_irq_source irq_source;
5322         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5323         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5324         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5325         int rc = 0;
5326
5327         if (enable) {
5328                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5329                 if (amdgpu_dm_vrr_active(acrtc_state))
5330                         rc = dm_set_vupdate_irq(crtc, true);
5331         } else {
5332                 /* vblank irq off -> vupdate irq off */
5333                 rc = dm_set_vupdate_irq(crtc, false);
5334         }
5335
5336         if (rc)
5337                 return rc;
5338
5339         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5340         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5341 }
5342
5343 static int dm_enable_vblank(struct drm_crtc *crtc)
5344 {
5345         return dm_set_vblank(crtc, true);
5346 }
5347
5348 static void dm_disable_vblank(struct drm_crtc *crtc)
5349 {
5350         dm_set_vblank(crtc, false);
5351 }
5352
5353 /* Implemented only the options currently availible for the driver */
5354 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5355         .reset = dm_crtc_reset_state,
5356         .destroy = amdgpu_dm_crtc_destroy,
5357         .gamma_set = drm_atomic_helper_legacy_gamma_set,
5358         .set_config = drm_atomic_helper_set_config,
5359         .page_flip = drm_atomic_helper_page_flip,
5360         .atomic_duplicate_state = dm_crtc_duplicate_state,
5361         .atomic_destroy_state = dm_crtc_destroy_state,
5362         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5363         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5364         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5365         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5366         .enable_vblank = dm_enable_vblank,
5367         .disable_vblank = dm_disable_vblank,
5368         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5369 };
5370
5371 static enum drm_connector_status
5372 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5373 {
5374         bool connected;
5375         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5376
5377         /*
5378          * Notes:
5379          * 1. This interface is NOT called in context of HPD irq.
5380          * 2. This interface *is called* in context of user-mode ioctl. Which
5381          * makes it a bad place for *any* MST-related activity.
5382          */
5383
5384         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5385             !aconnector->fake_enable)
5386                 connected = (aconnector->dc_sink != NULL);
5387         else
5388                 connected = (aconnector->base.force == DRM_FORCE_ON);
5389
5390         update_subconnector_property(aconnector);
5391
5392         return (connected ? connector_status_connected :
5393                         connector_status_disconnected);
5394 }
5395
5396 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5397                                             struct drm_connector_state *connector_state,
5398                                             struct drm_property *property,
5399                                             uint64_t val)
5400 {
5401         struct drm_device *dev = connector->dev;
5402         struct amdgpu_device *adev = drm_to_adev(dev);
5403         struct dm_connector_state *dm_old_state =
5404                 to_dm_connector_state(connector->state);
5405         struct dm_connector_state *dm_new_state =
5406                 to_dm_connector_state(connector_state);
5407
5408         int ret = -EINVAL;
5409
5410         if (property == dev->mode_config.scaling_mode_property) {
5411                 enum amdgpu_rmx_type rmx_type;
5412
5413                 switch (val) {
5414                 case DRM_MODE_SCALE_CENTER:
5415                         rmx_type = RMX_CENTER;
5416                         break;
5417                 case DRM_MODE_SCALE_ASPECT:
5418                         rmx_type = RMX_ASPECT;
5419                         break;
5420                 case DRM_MODE_SCALE_FULLSCREEN:
5421                         rmx_type = RMX_FULL;
5422                         break;
5423                 case DRM_MODE_SCALE_NONE:
5424                 default:
5425                         rmx_type = RMX_OFF;
5426                         break;
5427                 }
5428
5429                 if (dm_old_state->scaling == rmx_type)
5430                         return 0;
5431
5432                 dm_new_state->scaling = rmx_type;
5433                 ret = 0;
5434         } else if (property == adev->mode_info.underscan_hborder_property) {
5435                 dm_new_state->underscan_hborder = val;
5436                 ret = 0;
5437         } else if (property == adev->mode_info.underscan_vborder_property) {
5438                 dm_new_state->underscan_vborder = val;
5439                 ret = 0;
5440         } else if (property == adev->mode_info.underscan_property) {
5441                 dm_new_state->underscan_enable = val;
5442                 ret = 0;
5443         } else if (property == adev->mode_info.abm_level_property) {
5444                 dm_new_state->abm_level = val;
5445                 ret = 0;
5446         }
5447
5448         return ret;
5449 }
5450
5451 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5452                                             const struct drm_connector_state *state,
5453                                             struct drm_property *property,
5454                                             uint64_t *val)
5455 {
5456         struct drm_device *dev = connector->dev;
5457         struct amdgpu_device *adev = drm_to_adev(dev);
5458         struct dm_connector_state *dm_state =
5459                 to_dm_connector_state(state);
5460         int ret = -EINVAL;
5461
5462         if (property == dev->mode_config.scaling_mode_property) {
5463                 switch (dm_state->scaling) {
5464                 case RMX_CENTER:
5465                         *val = DRM_MODE_SCALE_CENTER;
5466                         break;
5467                 case RMX_ASPECT:
5468                         *val = DRM_MODE_SCALE_ASPECT;
5469                         break;
5470                 case RMX_FULL:
5471                         *val = DRM_MODE_SCALE_FULLSCREEN;
5472                         break;
5473                 case RMX_OFF:
5474                 default:
5475                         *val = DRM_MODE_SCALE_NONE;
5476                         break;
5477                 }
5478                 ret = 0;
5479         } else if (property == adev->mode_info.underscan_hborder_property) {
5480                 *val = dm_state->underscan_hborder;
5481                 ret = 0;
5482         } else if (property == adev->mode_info.underscan_vborder_property) {
5483                 *val = dm_state->underscan_vborder;
5484                 ret = 0;
5485         } else if (property == adev->mode_info.underscan_property) {
5486                 *val = dm_state->underscan_enable;
5487                 ret = 0;
5488         } else if (property == adev->mode_info.abm_level_property) {
5489                 *val = dm_state->abm_level;
5490                 ret = 0;
5491         }
5492
5493         return ret;
5494 }
5495
5496 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5497 {
5498         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5499
5500         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5501 }
5502
5503 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5504 {
5505         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5506         const struct dc_link *link = aconnector->dc_link;
5507         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5508         struct amdgpu_display_manager *dm = &adev->dm;
5509
5510         /*
5511          * Call only if mst_mgr was iniitalized before since it's not done
5512          * for all connector types.
5513          */
5514         if (aconnector->mst_mgr.dev)
5515                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5516
5517 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5518         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5519
5520         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5521             link->type != dc_connection_none &&
5522             dm->backlight_dev) {
5523                 backlight_device_unregister(dm->backlight_dev);
5524                 dm->backlight_dev = NULL;
5525         }
5526 #endif
5527
5528         if (aconnector->dc_em_sink)
5529                 dc_sink_release(aconnector->dc_em_sink);
5530         aconnector->dc_em_sink = NULL;
5531         if (aconnector->dc_sink)
5532                 dc_sink_release(aconnector->dc_sink);
5533         aconnector->dc_sink = NULL;
5534
5535         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5536         drm_connector_unregister(connector);
5537         drm_connector_cleanup(connector);
5538         if (aconnector->i2c) {
5539                 i2c_del_adapter(&aconnector->i2c->base);
5540                 kfree(aconnector->i2c);
5541         }
5542         kfree(aconnector->dm_dp_aux.aux.name);
5543
5544         kfree(connector);
5545 }
5546
5547 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5548 {
5549         struct dm_connector_state *state =
5550                 to_dm_connector_state(connector->state);
5551
5552         if (connector->state)
5553                 __drm_atomic_helper_connector_destroy_state(connector->state);
5554
5555         kfree(state);
5556
5557         state = kzalloc(sizeof(*state), GFP_KERNEL);
5558
5559         if (state) {
5560                 state->scaling = RMX_OFF;
5561                 state->underscan_enable = false;
5562                 state->underscan_hborder = 0;
5563                 state->underscan_vborder = 0;
5564                 state->base.max_requested_bpc = 8;
5565                 state->vcpi_slots = 0;
5566                 state->pbn = 0;
5567                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5568                         state->abm_level = amdgpu_dm_abm_level;
5569
5570                 __drm_atomic_helper_connector_reset(connector, &state->base);
5571         }
5572 }
5573
5574 struct drm_connector_state *
5575 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5576 {
5577         struct dm_connector_state *state =
5578                 to_dm_connector_state(connector->state);
5579
5580         struct dm_connector_state *new_state =
5581                         kmemdup(state, sizeof(*state), GFP_KERNEL);
5582
5583         if (!new_state)
5584                 return NULL;
5585
5586         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5587
5588         new_state->freesync_capable = state->freesync_capable;
5589         new_state->abm_level = state->abm_level;
5590         new_state->scaling = state->scaling;
5591         new_state->underscan_enable = state->underscan_enable;
5592         new_state->underscan_hborder = state->underscan_hborder;
5593         new_state->underscan_vborder = state->underscan_vborder;
5594         new_state->vcpi_slots = state->vcpi_slots;
5595         new_state->pbn = state->pbn;
5596         return &new_state->base;
5597 }
5598
5599 static int
5600 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5601 {
5602         struct amdgpu_dm_connector *amdgpu_dm_connector =
5603                 to_amdgpu_dm_connector(connector);
5604         int r;
5605
5606         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5607             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5608                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5609                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5610                 if (r)
5611                         return r;
5612         }
5613
5614 #if defined(CONFIG_DEBUG_FS)
5615         connector_debugfs_init(amdgpu_dm_connector);
5616 #endif
5617
5618         return 0;
5619 }
5620
5621 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5622         .reset = amdgpu_dm_connector_funcs_reset,
5623         .detect = amdgpu_dm_connector_detect,
5624         .fill_modes = drm_helper_probe_single_connector_modes,
5625         .destroy = amdgpu_dm_connector_destroy,
5626         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5627         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5628         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5629         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5630         .late_register = amdgpu_dm_connector_late_register,
5631         .early_unregister = amdgpu_dm_connector_unregister
5632 };
5633
5634 static int get_modes(struct drm_connector *connector)
5635 {
5636         return amdgpu_dm_connector_get_modes(connector);
5637 }
5638
5639 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5640 {
5641         struct dc_sink_init_data init_params = {
5642                         .link = aconnector->dc_link,
5643                         .sink_signal = SIGNAL_TYPE_VIRTUAL
5644         };
5645         struct edid *edid;
5646
5647         if (!aconnector->base.edid_blob_ptr) {
5648                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5649                                 aconnector->base.name);
5650
5651                 aconnector->base.force = DRM_FORCE_OFF;
5652                 aconnector->base.override_edid = false;
5653                 return;
5654         }
5655
5656         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5657
5658         aconnector->edid = edid;
5659
5660         aconnector->dc_em_sink = dc_link_add_remote_sink(
5661                 aconnector->dc_link,
5662                 (uint8_t *)edid,
5663                 (edid->extensions + 1) * EDID_LENGTH,
5664                 &init_params);
5665
5666         if (aconnector->base.force == DRM_FORCE_ON) {
5667                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5668                 aconnector->dc_link->local_sink :
5669                 aconnector->dc_em_sink;
5670                 dc_sink_retain(aconnector->dc_sink);
5671         }
5672 }
5673
5674 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5675 {
5676         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5677
5678         /*
5679          * In case of headless boot with force on for DP managed connector
5680          * Those settings have to be != 0 to get initial modeset
5681          */
5682         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5683                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5684                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5685         }
5686
5687
5688         aconnector->base.override_edid = true;
5689         create_eml_sink(aconnector);
5690 }
5691
5692 static struct dc_stream_state *
5693 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5694                                 const struct drm_display_mode *drm_mode,
5695                                 const struct dm_connector_state *dm_state,
5696                                 const struct dc_stream_state *old_stream)
5697 {
5698         struct drm_connector *connector = &aconnector->base;
5699         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5700         struct dc_stream_state *stream;
5701         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5702         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5703         enum dc_status dc_result = DC_OK;
5704
5705         do {
5706                 stream = create_stream_for_sink(aconnector, drm_mode,
5707                                                 dm_state, old_stream,
5708                                                 requested_bpc);
5709                 if (stream == NULL) {
5710                         DRM_ERROR("Failed to create stream for sink!\n");
5711                         break;
5712                 }
5713
5714                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5715
5716                 if (dc_result != DC_OK) {
5717                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5718                                       drm_mode->hdisplay,
5719                                       drm_mode->vdisplay,
5720                                       drm_mode->clock,
5721                                       dc_result,
5722                                       dc_status_to_str(dc_result));
5723
5724                         dc_stream_release(stream);
5725                         stream = NULL;
5726                         requested_bpc -= 2; /* lower bpc to retry validation */
5727                 }
5728
5729         } while (stream == NULL && requested_bpc >= 6);
5730
5731         return stream;
5732 }
5733
5734 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5735                                    struct drm_display_mode *mode)
5736 {
5737         int result = MODE_ERROR;
5738         struct dc_sink *dc_sink;
5739         /* TODO: Unhardcode stream count */
5740         struct dc_stream_state *stream;
5741         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5742
5743         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5744                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5745                 return result;
5746
5747         /*
5748          * Only run this the first time mode_valid is called to initilialize
5749          * EDID mgmt
5750          */
5751         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5752                 !aconnector->dc_em_sink)
5753                 handle_edid_mgmt(aconnector);
5754
5755         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5756
5757         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5758                                 aconnector->base.force != DRM_FORCE_ON) {
5759                 DRM_ERROR("dc_sink is NULL!\n");
5760                 goto fail;
5761         }
5762
5763         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5764         if (stream) {
5765                 dc_stream_release(stream);
5766                 result = MODE_OK;
5767         }
5768
5769 fail:
5770         /* TODO: error handling*/
5771         return result;
5772 }
5773
5774 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5775                                 struct dc_info_packet *out)
5776 {
5777         struct hdmi_drm_infoframe frame;
5778         unsigned char buf[30]; /* 26 + 4 */
5779         ssize_t len;
5780         int ret, i;
5781
5782         memset(out, 0, sizeof(*out));
5783
5784         if (!state->hdr_output_metadata)
5785                 return 0;
5786
5787         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5788         if (ret)
5789                 return ret;
5790
5791         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5792         if (len < 0)
5793                 return (int)len;
5794
5795         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5796         if (len != 30)
5797                 return -EINVAL;
5798
5799         /* Prepare the infopacket for DC. */
5800         switch (state->connector->connector_type) {
5801         case DRM_MODE_CONNECTOR_HDMIA:
5802                 out->hb0 = 0x87; /* type */
5803                 out->hb1 = 0x01; /* version */
5804                 out->hb2 = 0x1A; /* length */
5805                 out->sb[0] = buf[3]; /* checksum */
5806                 i = 1;
5807                 break;
5808
5809         case DRM_MODE_CONNECTOR_DisplayPort:
5810         case DRM_MODE_CONNECTOR_eDP:
5811                 out->hb0 = 0x00; /* sdp id, zero */
5812                 out->hb1 = 0x87; /* type */
5813                 out->hb2 = 0x1D; /* payload len - 1 */
5814                 out->hb3 = (0x13 << 2); /* sdp version */
5815                 out->sb[0] = 0x01; /* version */
5816                 out->sb[1] = 0x1A; /* length */
5817                 i = 2;
5818                 break;
5819
5820         default:
5821                 return -EINVAL;
5822         }
5823
5824         memcpy(&out->sb[i], &buf[4], 26);
5825         out->valid = true;
5826
5827         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5828                        sizeof(out->sb), false);
5829
5830         return 0;
5831 }
5832
5833 static bool
5834 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5835                           const struct drm_connector_state *new_state)
5836 {
5837         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5838         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5839
5840         if (old_blob != new_blob) {
5841                 if (old_blob && new_blob &&
5842                     old_blob->length == new_blob->length)
5843                         return memcmp(old_blob->data, new_blob->data,
5844                                       old_blob->length);
5845
5846                 return true;
5847         }
5848
5849         return false;
5850 }
5851
5852 static int
5853 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5854                                  struct drm_atomic_state *state)
5855 {
5856         struct drm_connector_state *new_con_state =
5857                 drm_atomic_get_new_connector_state(state, conn);
5858         struct drm_connector_state *old_con_state =
5859                 drm_atomic_get_old_connector_state(state, conn);
5860         struct drm_crtc *crtc = new_con_state->crtc;
5861         struct drm_crtc_state *new_crtc_state;
5862         int ret;
5863
5864         trace_amdgpu_dm_connector_atomic_check(new_con_state);
5865
5866         if (!crtc)
5867                 return 0;
5868
5869         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5870                 struct dc_info_packet hdr_infopacket;
5871
5872                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5873                 if (ret)
5874                         return ret;
5875
5876                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5877                 if (IS_ERR(new_crtc_state))
5878                         return PTR_ERR(new_crtc_state);
5879
5880                 /*
5881                  * DC considers the stream backends changed if the
5882                  * static metadata changes. Forcing the modeset also
5883                  * gives a simple way for userspace to switch from
5884                  * 8bpc to 10bpc when setting the metadata to enter
5885                  * or exit HDR.
5886                  *
5887                  * Changing the static metadata after it's been
5888                  * set is permissible, however. So only force a
5889                  * modeset if we're entering or exiting HDR.
5890                  */
5891                 new_crtc_state->mode_changed =
5892                         !old_con_state->hdr_output_metadata ||
5893                         !new_con_state->hdr_output_metadata;
5894         }
5895
5896         return 0;
5897 }
5898
5899 static const struct drm_connector_helper_funcs
5900 amdgpu_dm_connector_helper_funcs = {
5901         /*
5902          * If hotplugging a second bigger display in FB Con mode, bigger resolution
5903          * modes will be filtered by drm_mode_validate_size(), and those modes
5904          * are missing after user start lightdm. So we need to renew modes list.
5905          * in get_modes call back, not just return the modes count
5906          */
5907         .get_modes = get_modes,
5908         .mode_valid = amdgpu_dm_connector_mode_valid,
5909         .atomic_check = amdgpu_dm_connector_atomic_check,
5910 };
5911
5912 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5913 {
5914 }
5915
5916 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5917 {
5918         struct drm_atomic_state *state = new_crtc_state->state;
5919         struct drm_plane *plane;
5920         int num_active = 0;
5921
5922         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5923                 struct drm_plane_state *new_plane_state;
5924
5925                 /* Cursor planes are "fake". */
5926                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5927                         continue;
5928
5929                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5930
5931                 if (!new_plane_state) {
5932                         /*
5933                          * The plane is enable on the CRTC and hasn't changed
5934                          * state. This means that it previously passed
5935                          * validation and is therefore enabled.
5936                          */
5937                         num_active += 1;
5938                         continue;
5939                 }
5940
5941                 /* We need a framebuffer to be considered enabled. */
5942                 num_active += (new_plane_state->fb != NULL);
5943         }
5944
5945         return num_active;
5946 }
5947
5948 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5949                                          struct drm_crtc_state *new_crtc_state)
5950 {
5951         struct dm_crtc_state *dm_new_crtc_state =
5952                 to_dm_crtc_state(new_crtc_state);
5953
5954         dm_new_crtc_state->active_planes = 0;
5955
5956         if (!dm_new_crtc_state->stream)
5957                 return;
5958
5959         dm_new_crtc_state->active_planes =
5960                 count_crtc_active_planes(new_crtc_state);
5961 }
5962
5963 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5964                                        struct drm_atomic_state *state)
5965 {
5966         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
5967                                                                           crtc);
5968         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5969         struct dc *dc = adev->dm.dc;
5970         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5971         int ret = -EINVAL;
5972
5973         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
5974
5975         dm_update_crtc_active_planes(crtc, crtc_state);
5976
5977         if (unlikely(!dm_crtc_state->stream &&
5978                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
5979                 WARN_ON(1);
5980                 return ret;
5981         }
5982
5983         /*
5984          * We require the primary plane to be enabled whenever the CRTC is, otherwise
5985          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5986          * planes are disabled, which is not supported by the hardware. And there is legacy
5987          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5988          */
5989         if (crtc_state->enable &&
5990             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
5991                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
5992                 return -EINVAL;
5993         }
5994
5995         /* In some use cases, like reset, no stream is attached */
5996         if (!dm_crtc_state->stream)
5997                 return 0;
5998
5999         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6000                 return 0;
6001
6002         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6003         return ret;
6004 }
6005
6006 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6007                                       const struct drm_display_mode *mode,
6008                                       struct drm_display_mode *adjusted_mode)
6009 {
6010         return true;
6011 }
6012
6013 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6014         .disable = dm_crtc_helper_disable,
6015         .atomic_check = dm_crtc_helper_atomic_check,
6016         .mode_fixup = dm_crtc_helper_mode_fixup,
6017         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6018 };
6019
6020 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6021 {
6022
6023 }
6024
6025 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6026 {
6027         switch (display_color_depth) {
6028                 case COLOR_DEPTH_666:
6029                         return 6;
6030                 case COLOR_DEPTH_888:
6031                         return 8;
6032                 case COLOR_DEPTH_101010:
6033                         return 10;
6034                 case COLOR_DEPTH_121212:
6035                         return 12;
6036                 case COLOR_DEPTH_141414:
6037                         return 14;
6038                 case COLOR_DEPTH_161616:
6039                         return 16;
6040                 default:
6041                         break;
6042                 }
6043         return 0;
6044 }
6045
6046 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6047                                           struct drm_crtc_state *crtc_state,
6048                                           struct drm_connector_state *conn_state)
6049 {
6050         struct drm_atomic_state *state = crtc_state->state;
6051         struct drm_connector *connector = conn_state->connector;
6052         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6053         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6054         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6055         struct drm_dp_mst_topology_mgr *mst_mgr;
6056         struct drm_dp_mst_port *mst_port;
6057         enum dc_color_depth color_depth;
6058         int clock, bpp = 0;
6059         bool is_y420 = false;
6060
6061         if (!aconnector->port || !aconnector->dc_sink)
6062                 return 0;
6063
6064         mst_port = aconnector->port;
6065         mst_mgr = &aconnector->mst_port->mst_mgr;
6066
6067         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6068                 return 0;
6069
6070         if (!state->duplicated) {
6071                 int max_bpc = conn_state->max_requested_bpc;
6072                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6073                                 aconnector->force_yuv420_output;
6074                 color_depth = convert_color_depth_from_display_info(connector,
6075                                                                     is_y420,
6076                                                                     max_bpc);
6077                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6078                 clock = adjusted_mode->clock;
6079                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6080         }
6081         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6082                                                                            mst_mgr,
6083                                                                            mst_port,
6084                                                                            dm_new_connector_state->pbn,
6085                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6086         if (dm_new_connector_state->vcpi_slots < 0) {
6087                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6088                 return dm_new_connector_state->vcpi_slots;
6089         }
6090         return 0;
6091 }
6092
6093 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6094         .disable = dm_encoder_helper_disable,
6095         .atomic_check = dm_encoder_helper_atomic_check
6096 };
6097
6098 #if defined(CONFIG_DRM_AMD_DC_DCN)
6099 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6100                                             struct dc_state *dc_state)
6101 {
6102         struct dc_stream_state *stream = NULL;
6103         struct drm_connector *connector;
6104         struct drm_connector_state *new_con_state, *old_con_state;
6105         struct amdgpu_dm_connector *aconnector;
6106         struct dm_connector_state *dm_conn_state;
6107         int i, j, clock, bpp;
6108         int vcpi, pbn_div, pbn = 0;
6109
6110         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6111
6112                 aconnector = to_amdgpu_dm_connector(connector);
6113
6114                 if (!aconnector->port)
6115                         continue;
6116
6117                 if (!new_con_state || !new_con_state->crtc)
6118                         continue;
6119
6120                 dm_conn_state = to_dm_connector_state(new_con_state);
6121
6122                 for (j = 0; j < dc_state->stream_count; j++) {
6123                         stream = dc_state->streams[j];
6124                         if (!stream)
6125                                 continue;
6126
6127                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6128                                 break;
6129
6130                         stream = NULL;
6131                 }
6132
6133                 if (!stream)
6134                         continue;
6135
6136                 if (stream->timing.flags.DSC != 1) {
6137                         drm_dp_mst_atomic_enable_dsc(state,
6138                                                      aconnector->port,
6139                                                      dm_conn_state->pbn,
6140                                                      0,
6141                                                      false);
6142                         continue;
6143                 }
6144
6145                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6146                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6147                 clock = stream->timing.pix_clk_100hz / 10;
6148                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6149                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6150                                                     aconnector->port,
6151                                                     pbn, pbn_div,
6152                                                     true);
6153                 if (vcpi < 0)
6154                         return vcpi;
6155
6156                 dm_conn_state->pbn = pbn;
6157                 dm_conn_state->vcpi_slots = vcpi;
6158         }
6159         return 0;
6160 }
6161 #endif
6162
6163 static void dm_drm_plane_reset(struct drm_plane *plane)
6164 {
6165         struct dm_plane_state *amdgpu_state = NULL;
6166
6167         if (plane->state)
6168                 plane->funcs->atomic_destroy_state(plane, plane->state);
6169
6170         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6171         WARN_ON(amdgpu_state == NULL);
6172
6173         if (amdgpu_state)
6174                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6175 }
6176
6177 static struct drm_plane_state *
6178 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6179 {
6180         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6181
6182         old_dm_plane_state = to_dm_plane_state(plane->state);
6183         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6184         if (!dm_plane_state)
6185                 return NULL;
6186
6187         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6188
6189         if (old_dm_plane_state->dc_state) {
6190                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6191                 dc_plane_state_retain(dm_plane_state->dc_state);
6192         }
6193
6194         return &dm_plane_state->base;
6195 }
6196
6197 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6198                                 struct drm_plane_state *state)
6199 {
6200         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6201
6202         if (dm_plane_state->dc_state)
6203                 dc_plane_state_release(dm_plane_state->dc_state);
6204
6205         drm_atomic_helper_plane_destroy_state(plane, state);
6206 }
6207
6208 static const struct drm_plane_funcs dm_plane_funcs = {
6209         .update_plane   = drm_atomic_helper_update_plane,
6210         .disable_plane  = drm_atomic_helper_disable_plane,
6211         .destroy        = drm_primary_helper_destroy,
6212         .reset = dm_drm_plane_reset,
6213         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6214         .atomic_destroy_state = dm_drm_plane_destroy_state,
6215         .format_mod_supported = dm_plane_format_mod_supported,
6216 };
6217
6218 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6219                                       struct drm_plane_state *new_state)
6220 {
6221         struct amdgpu_framebuffer *afb;
6222         struct drm_gem_object *obj;
6223         struct amdgpu_device *adev;
6224         struct amdgpu_bo *rbo;
6225         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6226         struct list_head list;
6227         struct ttm_validate_buffer tv;
6228         struct ww_acquire_ctx ticket;
6229         uint32_t domain;
6230         int r;
6231
6232         if (!new_state->fb) {
6233                 DRM_DEBUG_DRIVER("No FB bound\n");
6234                 return 0;
6235         }
6236
6237         afb = to_amdgpu_framebuffer(new_state->fb);
6238         obj = new_state->fb->obj[0];
6239         rbo = gem_to_amdgpu_bo(obj);
6240         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6241         INIT_LIST_HEAD(&list);
6242
6243         tv.bo = &rbo->tbo;
6244         tv.num_shared = 1;
6245         list_add(&tv.head, &list);
6246
6247         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6248         if (r) {
6249                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6250                 return r;
6251         }
6252
6253         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6254                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6255         else
6256                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6257
6258         r = amdgpu_bo_pin(rbo, domain);
6259         if (unlikely(r != 0)) {
6260                 if (r != -ERESTARTSYS)
6261                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6262                 ttm_eu_backoff_reservation(&ticket, &list);
6263                 return r;
6264         }
6265
6266         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6267         if (unlikely(r != 0)) {
6268                 amdgpu_bo_unpin(rbo);
6269                 ttm_eu_backoff_reservation(&ticket, &list);
6270                 DRM_ERROR("%p bind failed\n", rbo);
6271                 return r;
6272         }
6273
6274         ttm_eu_backoff_reservation(&ticket, &list);
6275
6276         afb->address = amdgpu_bo_gpu_offset(rbo);
6277
6278         amdgpu_bo_ref(rbo);
6279
6280         /**
6281          * We don't do surface updates on planes that have been newly created,
6282          * but we also don't have the afb->address during atomic check.
6283          *
6284          * Fill in buffer attributes depending on the address here, but only on
6285          * newly created planes since they're not being used by DC yet and this
6286          * won't modify global state.
6287          */
6288         dm_plane_state_old = to_dm_plane_state(plane->state);
6289         dm_plane_state_new = to_dm_plane_state(new_state);
6290
6291         if (dm_plane_state_new->dc_state &&
6292             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6293                 struct dc_plane_state *plane_state =
6294                         dm_plane_state_new->dc_state;
6295                 bool force_disable_dcc = !plane_state->dcc.enable;
6296
6297                 fill_plane_buffer_attributes(
6298                         adev, afb, plane_state->format, plane_state->rotation,
6299                         afb->tiling_flags,
6300                         &plane_state->tiling_info, &plane_state->plane_size,
6301                         &plane_state->dcc, &plane_state->address,
6302                         afb->tmz_surface, force_disable_dcc);
6303         }
6304
6305         return 0;
6306 }
6307
6308 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6309                                        struct drm_plane_state *old_state)
6310 {
6311         struct amdgpu_bo *rbo;
6312         int r;
6313
6314         if (!old_state->fb)
6315                 return;
6316
6317         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6318         r = amdgpu_bo_reserve(rbo, false);
6319         if (unlikely(r)) {
6320                 DRM_ERROR("failed to reserve rbo before unpin\n");
6321                 return;
6322         }
6323
6324         amdgpu_bo_unpin(rbo);
6325         amdgpu_bo_unreserve(rbo);
6326         amdgpu_bo_unref(&rbo);
6327 }
6328
6329 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6330                                        struct drm_crtc_state *new_crtc_state)
6331 {
6332         int max_downscale = 0;
6333         int max_upscale = INT_MAX;
6334
6335         /* TODO: These should be checked against DC plane caps */
6336         return drm_atomic_helper_check_plane_state(
6337                 state, new_crtc_state, max_downscale, max_upscale, true, true);
6338 }
6339
6340 static int dm_plane_atomic_check(struct drm_plane *plane,
6341                                  struct drm_plane_state *state)
6342 {
6343         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6344         struct dc *dc = adev->dm.dc;
6345         struct dm_plane_state *dm_plane_state;
6346         struct dc_scaling_info scaling_info;
6347         struct drm_crtc_state *new_crtc_state;
6348         int ret;
6349
6350         trace_amdgpu_dm_plane_atomic_check(state);
6351
6352         dm_plane_state = to_dm_plane_state(state);
6353
6354         if (!dm_plane_state->dc_state)
6355                 return 0;
6356
6357         new_crtc_state =
6358                 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6359         if (!new_crtc_state)
6360                 return -EINVAL;
6361
6362         ret = dm_plane_helper_check_state(state, new_crtc_state);
6363         if (ret)
6364                 return ret;
6365
6366         ret = fill_dc_scaling_info(state, &scaling_info);
6367         if (ret)
6368                 return ret;
6369
6370         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6371                 return 0;
6372
6373         return -EINVAL;
6374 }
6375
6376 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6377                                        struct drm_plane_state *new_plane_state)
6378 {
6379         /* Only support async updates on cursor planes. */
6380         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6381                 return -EINVAL;
6382
6383         return 0;
6384 }
6385
6386 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6387                                          struct drm_plane_state *new_state)
6388 {
6389         struct drm_plane_state *old_state =
6390                 drm_atomic_get_old_plane_state(new_state->state, plane);
6391
6392         trace_amdgpu_dm_atomic_update_cursor(new_state);
6393
6394         swap(plane->state->fb, new_state->fb);
6395
6396         plane->state->src_x = new_state->src_x;
6397         plane->state->src_y = new_state->src_y;
6398         plane->state->src_w = new_state->src_w;
6399         plane->state->src_h = new_state->src_h;
6400         plane->state->crtc_x = new_state->crtc_x;
6401         plane->state->crtc_y = new_state->crtc_y;
6402         plane->state->crtc_w = new_state->crtc_w;
6403         plane->state->crtc_h = new_state->crtc_h;
6404
6405         handle_cursor_update(plane, old_state);
6406 }
6407
6408 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6409         .prepare_fb = dm_plane_helper_prepare_fb,
6410         .cleanup_fb = dm_plane_helper_cleanup_fb,
6411         .atomic_check = dm_plane_atomic_check,
6412         .atomic_async_check = dm_plane_atomic_async_check,
6413         .atomic_async_update = dm_plane_atomic_async_update
6414 };
6415
6416 /*
6417  * TODO: these are currently initialized to rgb formats only.
6418  * For future use cases we should either initialize them dynamically based on
6419  * plane capabilities, or initialize this array to all formats, so internal drm
6420  * check will succeed, and let DC implement proper check
6421  */
6422 static const uint32_t rgb_formats[] = {
6423         DRM_FORMAT_XRGB8888,
6424         DRM_FORMAT_ARGB8888,
6425         DRM_FORMAT_RGBA8888,
6426         DRM_FORMAT_XRGB2101010,
6427         DRM_FORMAT_XBGR2101010,
6428         DRM_FORMAT_ARGB2101010,
6429         DRM_FORMAT_ABGR2101010,
6430         DRM_FORMAT_XBGR8888,
6431         DRM_FORMAT_ABGR8888,
6432         DRM_FORMAT_RGB565,
6433 };
6434
6435 static const uint32_t overlay_formats[] = {
6436         DRM_FORMAT_XRGB8888,
6437         DRM_FORMAT_ARGB8888,
6438         DRM_FORMAT_RGBA8888,
6439         DRM_FORMAT_XBGR8888,
6440         DRM_FORMAT_ABGR8888,
6441         DRM_FORMAT_RGB565
6442 };
6443
6444 static const u32 cursor_formats[] = {
6445         DRM_FORMAT_ARGB8888
6446 };
6447
6448 static int get_plane_formats(const struct drm_plane *plane,
6449                              const struct dc_plane_cap *plane_cap,
6450                              uint32_t *formats, int max_formats)
6451 {
6452         int i, num_formats = 0;
6453
6454         /*
6455          * TODO: Query support for each group of formats directly from
6456          * DC plane caps. This will require adding more formats to the
6457          * caps list.
6458          */
6459
6460         switch (plane->type) {
6461         case DRM_PLANE_TYPE_PRIMARY:
6462                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6463                         if (num_formats >= max_formats)
6464                                 break;
6465
6466                         formats[num_formats++] = rgb_formats[i];
6467                 }
6468
6469                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6470                         formats[num_formats++] = DRM_FORMAT_NV12;
6471                 if (plane_cap && plane_cap->pixel_format_support.p010)
6472                         formats[num_formats++] = DRM_FORMAT_P010;
6473                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6474                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6475                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6476                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6477                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6478                 }
6479                 break;
6480
6481         case DRM_PLANE_TYPE_OVERLAY:
6482                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6483                         if (num_formats >= max_formats)
6484                                 break;
6485
6486                         formats[num_formats++] = overlay_formats[i];
6487                 }
6488                 break;
6489
6490         case DRM_PLANE_TYPE_CURSOR:
6491                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6492                         if (num_formats >= max_formats)
6493                                 break;
6494
6495                         formats[num_formats++] = cursor_formats[i];
6496                 }
6497                 break;
6498         }
6499
6500         return num_formats;
6501 }
6502
6503 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6504                                 struct drm_plane *plane,
6505                                 unsigned long possible_crtcs,
6506                                 const struct dc_plane_cap *plane_cap)
6507 {
6508         uint32_t formats[32];
6509         int num_formats;
6510         int res = -EPERM;
6511         unsigned int supported_rotations;
6512         uint64_t *modifiers = NULL;
6513
6514         num_formats = get_plane_formats(plane, plane_cap, formats,
6515                                         ARRAY_SIZE(formats));
6516
6517         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6518         if (res)
6519                 return res;
6520
6521         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6522                                        &dm_plane_funcs, formats, num_formats,
6523                                        modifiers, plane->type, NULL);
6524         kfree(modifiers);
6525         if (res)
6526                 return res;
6527
6528         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6529             plane_cap && plane_cap->per_pixel_alpha) {
6530                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6531                                           BIT(DRM_MODE_BLEND_PREMULTI);
6532
6533                 drm_plane_create_alpha_property(plane);
6534                 drm_plane_create_blend_mode_property(plane, blend_caps);
6535         }
6536
6537         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6538             plane_cap &&
6539             (plane_cap->pixel_format_support.nv12 ||
6540              plane_cap->pixel_format_support.p010)) {
6541                 /* This only affects YUV formats. */
6542                 drm_plane_create_color_properties(
6543                         plane,
6544                         BIT(DRM_COLOR_YCBCR_BT601) |
6545                         BIT(DRM_COLOR_YCBCR_BT709) |
6546                         BIT(DRM_COLOR_YCBCR_BT2020),
6547                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6548                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6549                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6550         }
6551
6552         supported_rotations =
6553                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6554                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6555
6556         if (dm->adev->asic_type >= CHIP_BONAIRE &&
6557             plane->type != DRM_PLANE_TYPE_CURSOR)
6558                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6559                                                    supported_rotations);
6560
6561         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6562
6563         /* Create (reset) the plane state */
6564         if (plane->funcs->reset)
6565                 plane->funcs->reset(plane);
6566
6567         return 0;
6568 }
6569
6570 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6571                                struct drm_plane *plane,
6572                                uint32_t crtc_index)
6573 {
6574         struct amdgpu_crtc *acrtc = NULL;
6575         struct drm_plane *cursor_plane;
6576
6577         int res = -ENOMEM;
6578
6579         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6580         if (!cursor_plane)
6581                 goto fail;
6582
6583         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6584         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6585
6586         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6587         if (!acrtc)
6588                 goto fail;
6589
6590         res = drm_crtc_init_with_planes(
6591                         dm->ddev,
6592                         &acrtc->base,
6593                         plane,
6594                         cursor_plane,
6595                         &amdgpu_dm_crtc_funcs, NULL);
6596
6597         if (res)
6598                 goto fail;
6599
6600         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6601
6602         /* Create (reset) the plane state */
6603         if (acrtc->base.funcs->reset)
6604                 acrtc->base.funcs->reset(&acrtc->base);
6605
6606         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6607         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6608
6609         acrtc->crtc_id = crtc_index;
6610         acrtc->base.enabled = false;
6611         acrtc->otg_inst = -1;
6612
6613         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6614         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6615                                    true, MAX_COLOR_LUT_ENTRIES);
6616         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6617
6618         return 0;
6619
6620 fail:
6621         kfree(acrtc);
6622         kfree(cursor_plane);
6623         return res;
6624 }
6625
6626
6627 static int to_drm_connector_type(enum signal_type st)
6628 {
6629         switch (st) {
6630         case SIGNAL_TYPE_HDMI_TYPE_A:
6631                 return DRM_MODE_CONNECTOR_HDMIA;
6632         case SIGNAL_TYPE_EDP:
6633                 return DRM_MODE_CONNECTOR_eDP;
6634         case SIGNAL_TYPE_LVDS:
6635                 return DRM_MODE_CONNECTOR_LVDS;
6636         case SIGNAL_TYPE_RGB:
6637                 return DRM_MODE_CONNECTOR_VGA;
6638         case SIGNAL_TYPE_DISPLAY_PORT:
6639         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6640                 return DRM_MODE_CONNECTOR_DisplayPort;
6641         case SIGNAL_TYPE_DVI_DUAL_LINK:
6642         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6643                 return DRM_MODE_CONNECTOR_DVID;
6644         case SIGNAL_TYPE_VIRTUAL:
6645                 return DRM_MODE_CONNECTOR_VIRTUAL;
6646
6647         default:
6648                 return DRM_MODE_CONNECTOR_Unknown;
6649         }
6650 }
6651
6652 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6653 {
6654         struct drm_encoder *encoder;
6655
6656         /* There is only one encoder per connector */
6657         drm_connector_for_each_possible_encoder(connector, encoder)
6658                 return encoder;
6659
6660         return NULL;
6661 }
6662
6663 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6664 {
6665         struct drm_encoder *encoder;
6666         struct amdgpu_encoder *amdgpu_encoder;
6667
6668         encoder = amdgpu_dm_connector_to_encoder(connector);
6669
6670         if (encoder == NULL)
6671                 return;
6672
6673         amdgpu_encoder = to_amdgpu_encoder(encoder);
6674
6675         amdgpu_encoder->native_mode.clock = 0;
6676
6677         if (!list_empty(&connector->probed_modes)) {
6678                 struct drm_display_mode *preferred_mode = NULL;
6679
6680                 list_for_each_entry(preferred_mode,
6681                                     &connector->probed_modes,
6682                                     head) {
6683                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6684                                 amdgpu_encoder->native_mode = *preferred_mode;
6685
6686                         break;
6687                 }
6688
6689         }
6690 }
6691
6692 static struct drm_display_mode *
6693 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6694                              char *name,
6695                              int hdisplay, int vdisplay)
6696 {
6697         struct drm_device *dev = encoder->dev;
6698         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6699         struct drm_display_mode *mode = NULL;
6700         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6701
6702         mode = drm_mode_duplicate(dev, native_mode);
6703
6704         if (mode == NULL)
6705                 return NULL;
6706
6707         mode->hdisplay = hdisplay;
6708         mode->vdisplay = vdisplay;
6709         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6710         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6711
6712         return mode;
6713
6714 }
6715
6716 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6717                                                  struct drm_connector *connector)
6718 {
6719         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6720         struct drm_display_mode *mode = NULL;
6721         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6722         struct amdgpu_dm_connector *amdgpu_dm_connector =
6723                                 to_amdgpu_dm_connector(connector);
6724         int i;
6725         int n;
6726         struct mode_size {
6727                 char name[DRM_DISPLAY_MODE_LEN];
6728                 int w;
6729                 int h;
6730         } common_modes[] = {
6731                 {  "640x480",  640,  480},
6732                 {  "800x600",  800,  600},
6733                 { "1024x768", 1024,  768},
6734                 { "1280x720", 1280,  720},
6735                 { "1280x800", 1280,  800},
6736                 {"1280x1024", 1280, 1024},
6737                 { "1440x900", 1440,  900},
6738                 {"1680x1050", 1680, 1050},
6739                 {"1600x1200", 1600, 1200},
6740                 {"1920x1080", 1920, 1080},
6741                 {"1920x1200", 1920, 1200}
6742         };
6743
6744         n = ARRAY_SIZE(common_modes);
6745
6746         for (i = 0; i < n; i++) {
6747                 struct drm_display_mode *curmode = NULL;
6748                 bool mode_existed = false;
6749
6750                 if (common_modes[i].w > native_mode->hdisplay ||
6751                     common_modes[i].h > native_mode->vdisplay ||
6752                    (common_modes[i].w == native_mode->hdisplay &&
6753                     common_modes[i].h == native_mode->vdisplay))
6754                         continue;
6755
6756                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6757                         if (common_modes[i].w == curmode->hdisplay &&
6758                             common_modes[i].h == curmode->vdisplay) {
6759                                 mode_existed = true;
6760                                 break;
6761                         }
6762                 }
6763
6764                 if (mode_existed)
6765                         continue;
6766
6767                 mode = amdgpu_dm_create_common_mode(encoder,
6768                                 common_modes[i].name, common_modes[i].w,
6769                                 common_modes[i].h);
6770                 drm_mode_probed_add(connector, mode);
6771                 amdgpu_dm_connector->num_modes++;
6772         }
6773 }
6774
6775 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6776                                               struct edid *edid)
6777 {
6778         struct amdgpu_dm_connector *amdgpu_dm_connector =
6779                         to_amdgpu_dm_connector(connector);
6780
6781         if (edid) {
6782                 /* empty probed_modes */
6783                 INIT_LIST_HEAD(&connector->probed_modes);
6784                 amdgpu_dm_connector->num_modes =
6785                                 drm_add_edid_modes(connector, edid);
6786
6787                 /* sorting the probed modes before calling function
6788                  * amdgpu_dm_get_native_mode() since EDID can have
6789                  * more than one preferred mode. The modes that are
6790                  * later in the probed mode list could be of higher
6791                  * and preferred resolution. For example, 3840x2160
6792                  * resolution in base EDID preferred timing and 4096x2160
6793                  * preferred resolution in DID extension block later.
6794                  */
6795                 drm_mode_sort(&connector->probed_modes);
6796                 amdgpu_dm_get_native_mode(connector);
6797         } else {
6798                 amdgpu_dm_connector->num_modes = 0;
6799         }
6800 }
6801
6802 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6803 {
6804         struct amdgpu_dm_connector *amdgpu_dm_connector =
6805                         to_amdgpu_dm_connector(connector);
6806         struct drm_encoder *encoder;
6807         struct edid *edid = amdgpu_dm_connector->edid;
6808
6809         encoder = amdgpu_dm_connector_to_encoder(connector);
6810
6811         if (!drm_edid_is_valid(edid)) {
6812                 amdgpu_dm_connector->num_modes =
6813                                 drm_add_modes_noedid(connector, 640, 480);
6814         } else {
6815                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6816                 amdgpu_dm_connector_add_common_modes(encoder, connector);
6817         }
6818         amdgpu_dm_fbc_init(connector);
6819
6820         return amdgpu_dm_connector->num_modes;
6821 }
6822
6823 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6824                                      struct amdgpu_dm_connector *aconnector,
6825                                      int connector_type,
6826                                      struct dc_link *link,
6827                                      int link_index)
6828 {
6829         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6830
6831         /*
6832          * Some of the properties below require access to state, like bpc.
6833          * Allocate some default initial connector state with our reset helper.
6834          */
6835         if (aconnector->base.funcs->reset)
6836                 aconnector->base.funcs->reset(&aconnector->base);
6837
6838         aconnector->connector_id = link_index;
6839         aconnector->dc_link = link;
6840         aconnector->base.interlace_allowed = false;
6841         aconnector->base.doublescan_allowed = false;
6842         aconnector->base.stereo_allowed = false;
6843         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6844         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6845         aconnector->audio_inst = -1;
6846         mutex_init(&aconnector->hpd_lock);
6847
6848         /*
6849          * configure support HPD hot plug connector_>polled default value is 0
6850          * which means HPD hot plug not supported
6851          */
6852         switch (connector_type) {
6853         case DRM_MODE_CONNECTOR_HDMIA:
6854                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6855                 aconnector->base.ycbcr_420_allowed =
6856                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6857                 break;
6858         case DRM_MODE_CONNECTOR_DisplayPort:
6859                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6860                 aconnector->base.ycbcr_420_allowed =
6861                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
6862                 break;
6863         case DRM_MODE_CONNECTOR_DVID:
6864                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6865                 break;
6866         default:
6867                 break;
6868         }
6869
6870         drm_object_attach_property(&aconnector->base.base,
6871                                 dm->ddev->mode_config.scaling_mode_property,
6872                                 DRM_MODE_SCALE_NONE);
6873
6874         drm_object_attach_property(&aconnector->base.base,
6875                                 adev->mode_info.underscan_property,
6876                                 UNDERSCAN_OFF);
6877         drm_object_attach_property(&aconnector->base.base,
6878                                 adev->mode_info.underscan_hborder_property,
6879                                 0);
6880         drm_object_attach_property(&aconnector->base.base,
6881                                 adev->mode_info.underscan_vborder_property,
6882                                 0);
6883
6884         if (!aconnector->mst_port)
6885                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6886
6887         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6888         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6889         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6890
6891         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6892             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6893                 drm_object_attach_property(&aconnector->base.base,
6894                                 adev->mode_info.abm_level_property, 0);
6895         }
6896
6897         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6898             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6899             connector_type == DRM_MODE_CONNECTOR_eDP) {
6900                 drm_object_attach_property(
6901                         &aconnector->base.base,
6902                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
6903
6904                 if (!aconnector->mst_port)
6905                         drm_connector_attach_vrr_capable_property(&aconnector->base);
6906
6907 #ifdef CONFIG_DRM_AMD_DC_HDCP
6908                 if (adev->dm.hdcp_workqueue)
6909                         drm_connector_attach_content_protection_property(&aconnector->base, true);
6910 #endif
6911         }
6912 }
6913
6914 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6915                               struct i2c_msg *msgs, int num)
6916 {
6917         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6918         struct ddc_service *ddc_service = i2c->ddc_service;
6919         struct i2c_command cmd;
6920         int i;
6921         int result = -EIO;
6922
6923         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6924
6925         if (!cmd.payloads)
6926                 return result;
6927
6928         cmd.number_of_payloads = num;
6929         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6930         cmd.speed = 100;
6931
6932         for (i = 0; i < num; i++) {
6933                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6934                 cmd.payloads[i].address = msgs[i].addr;
6935                 cmd.payloads[i].length = msgs[i].len;
6936                 cmd.payloads[i].data = msgs[i].buf;
6937         }
6938
6939         if (dc_submit_i2c(
6940                         ddc_service->ctx->dc,
6941                         ddc_service->ddc_pin->hw_info.ddc_channel,
6942                         &cmd))
6943                 result = num;
6944
6945         kfree(cmd.payloads);
6946         return result;
6947 }
6948
6949 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6950 {
6951         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6952 }
6953
6954 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6955         .master_xfer = amdgpu_dm_i2c_xfer,
6956         .functionality = amdgpu_dm_i2c_func,
6957 };
6958
6959 static struct amdgpu_i2c_adapter *
6960 create_i2c(struct ddc_service *ddc_service,
6961            int link_index,
6962            int *res)
6963 {
6964         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6965         struct amdgpu_i2c_adapter *i2c;
6966
6967         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6968         if (!i2c)
6969                 return NULL;
6970         i2c->base.owner = THIS_MODULE;
6971         i2c->base.class = I2C_CLASS_DDC;
6972         i2c->base.dev.parent = &adev->pdev->dev;
6973         i2c->base.algo = &amdgpu_dm_i2c_algo;
6974         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6975         i2c_set_adapdata(&i2c->base, i2c);
6976         i2c->ddc_service = ddc_service;
6977         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6978
6979         return i2c;
6980 }
6981
6982
6983 /*
6984  * Note: this function assumes that dc_link_detect() was called for the
6985  * dc_link which will be represented by this aconnector.
6986  */
6987 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6988                                     struct amdgpu_dm_connector *aconnector,
6989                                     uint32_t link_index,
6990                                     struct amdgpu_encoder *aencoder)
6991 {
6992         int res = 0;
6993         int connector_type;
6994         struct dc *dc = dm->dc;
6995         struct dc_link *link = dc_get_link_at_index(dc, link_index);
6996         struct amdgpu_i2c_adapter *i2c;
6997
6998         link->priv = aconnector;
6999
7000         DRM_DEBUG_DRIVER("%s()\n", __func__);
7001
7002         i2c = create_i2c(link->ddc, link->link_index, &res);
7003         if (!i2c) {
7004                 DRM_ERROR("Failed to create i2c adapter data\n");
7005                 return -ENOMEM;
7006         }
7007
7008         aconnector->i2c = i2c;
7009         res = i2c_add_adapter(&i2c->base);
7010
7011         if (res) {
7012                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7013                 goto out_free;
7014         }
7015
7016         connector_type = to_drm_connector_type(link->connector_signal);
7017
7018         res = drm_connector_init_with_ddc(
7019                         dm->ddev,
7020                         &aconnector->base,
7021                         &amdgpu_dm_connector_funcs,
7022                         connector_type,
7023                         &i2c->base);
7024
7025         if (res) {
7026                 DRM_ERROR("connector_init failed\n");
7027                 aconnector->connector_id = -1;
7028                 goto out_free;
7029         }
7030
7031         drm_connector_helper_add(
7032                         &aconnector->base,
7033                         &amdgpu_dm_connector_helper_funcs);
7034
7035         amdgpu_dm_connector_init_helper(
7036                 dm,
7037                 aconnector,
7038                 connector_type,
7039                 link,
7040                 link_index);
7041
7042         drm_connector_attach_encoder(
7043                 &aconnector->base, &aencoder->base);
7044
7045         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7046                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7047                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7048
7049 out_free:
7050         if (res) {
7051                 kfree(i2c);
7052                 aconnector->i2c = NULL;
7053         }
7054         return res;
7055 }
7056
7057 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7058 {
7059         switch (adev->mode_info.num_crtc) {
7060         case 1:
7061                 return 0x1;
7062         case 2:
7063                 return 0x3;
7064         case 3:
7065                 return 0x7;
7066         case 4:
7067                 return 0xf;
7068         case 5:
7069                 return 0x1f;
7070         case 6:
7071         default:
7072                 return 0x3f;
7073         }
7074 }
7075
7076 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7077                                   struct amdgpu_encoder *aencoder,
7078                                   uint32_t link_index)
7079 {
7080         struct amdgpu_device *adev = drm_to_adev(dev);
7081
7082         int res = drm_encoder_init(dev,
7083                                    &aencoder->base,
7084                                    &amdgpu_dm_encoder_funcs,
7085                                    DRM_MODE_ENCODER_TMDS,
7086                                    NULL);
7087
7088         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7089
7090         if (!res)
7091                 aencoder->encoder_id = link_index;
7092         else
7093                 aencoder->encoder_id = -1;
7094
7095         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7096
7097         return res;
7098 }
7099
7100 static void manage_dm_interrupts(struct amdgpu_device *adev,
7101                                  struct amdgpu_crtc *acrtc,
7102                                  bool enable)
7103 {
7104         /*
7105          * We have no guarantee that the frontend index maps to the same
7106          * backend index - some even map to more than one.
7107          *
7108          * TODO: Use a different interrupt or check DC itself for the mapping.
7109          */
7110         int irq_type =
7111                 amdgpu_display_crtc_idx_to_irq_type(
7112                         adev,
7113                         acrtc->crtc_id);
7114
7115         if (enable) {
7116                 drm_crtc_vblank_on(&acrtc->base);
7117                 amdgpu_irq_get(
7118                         adev,
7119                         &adev->pageflip_irq,
7120                         irq_type);
7121         } else {
7122
7123                 amdgpu_irq_put(
7124                         adev,
7125                         &adev->pageflip_irq,
7126                         irq_type);
7127                 drm_crtc_vblank_off(&acrtc->base);
7128         }
7129 }
7130
7131 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7132                                       struct amdgpu_crtc *acrtc)
7133 {
7134         int irq_type =
7135                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7136
7137         /**
7138          * This reads the current state for the IRQ and force reapplies
7139          * the setting to hardware.
7140          */
7141         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7142 }
7143
7144 static bool
7145 is_scaling_state_different(const struct dm_connector_state *dm_state,
7146                            const struct dm_connector_state *old_dm_state)
7147 {
7148         if (dm_state->scaling != old_dm_state->scaling)
7149                 return true;
7150         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7151                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7152                         return true;
7153         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7154                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7155                         return true;
7156         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7157                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7158                 return true;
7159         return false;
7160 }
7161
7162 #ifdef CONFIG_DRM_AMD_DC_HDCP
7163 static bool is_content_protection_different(struct drm_connector_state *state,
7164                                             const struct drm_connector_state *old_state,
7165                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7166 {
7167         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7168         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7169
7170         /* Handle: Type0/1 change */
7171         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7172             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7173                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7174                 return true;
7175         }
7176
7177         /* CP is being re enabled, ignore this
7178          *
7179          * Handles:     ENABLED -> DESIRED
7180          */
7181         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7182             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7183                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7184                 return false;
7185         }
7186
7187         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7188          *
7189          * Handles:     UNDESIRED -> ENABLED
7190          */
7191         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7192             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7193                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7194
7195         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7196          * hot-plug, headless s3, dpms
7197          *
7198          * Handles:     DESIRED -> DESIRED (Special case)
7199          */
7200         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7201             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7202                 dm_con_state->update_hdcp = false;
7203                 return true;
7204         }
7205
7206         /*
7207          * Handles:     UNDESIRED -> UNDESIRED
7208          *              DESIRED -> DESIRED
7209          *              ENABLED -> ENABLED
7210          */
7211         if (old_state->content_protection == state->content_protection)
7212                 return false;
7213
7214         /*
7215          * Handles:     UNDESIRED -> DESIRED
7216          *              DESIRED -> UNDESIRED
7217          *              ENABLED -> UNDESIRED
7218          */
7219         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7220                 return true;
7221
7222         /*
7223          * Handles:     DESIRED -> ENABLED
7224          */
7225         return false;
7226 }
7227
7228 #endif
7229 static void remove_stream(struct amdgpu_device *adev,
7230                           struct amdgpu_crtc *acrtc,
7231                           struct dc_stream_state *stream)
7232 {
7233         /* this is the update mode case */
7234
7235         acrtc->otg_inst = -1;
7236         acrtc->enabled = false;
7237 }
7238
7239 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7240                                struct dc_cursor_position *position)
7241 {
7242         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7243         int x, y;
7244         int xorigin = 0, yorigin = 0;
7245
7246         position->enable = false;
7247         position->x = 0;
7248         position->y = 0;
7249
7250         if (!crtc || !plane->state->fb)
7251                 return 0;
7252
7253         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7254             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7255                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7256                           __func__,
7257                           plane->state->crtc_w,
7258                           plane->state->crtc_h);
7259                 return -EINVAL;
7260         }
7261
7262         x = plane->state->crtc_x;
7263         y = plane->state->crtc_y;
7264
7265         if (x <= -amdgpu_crtc->max_cursor_width ||
7266             y <= -amdgpu_crtc->max_cursor_height)
7267                 return 0;
7268
7269         if (x < 0) {
7270                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7271                 x = 0;
7272         }
7273         if (y < 0) {
7274                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7275                 y = 0;
7276         }
7277         position->enable = true;
7278         position->translate_by_source = true;
7279         position->x = x;
7280         position->y = y;
7281         position->x_hotspot = xorigin;
7282         position->y_hotspot = yorigin;
7283
7284         return 0;
7285 }
7286
7287 static void handle_cursor_update(struct drm_plane *plane,
7288                                  struct drm_plane_state *old_plane_state)
7289 {
7290         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7291         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7292         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7293         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7294         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7295         uint64_t address = afb ? afb->address : 0;
7296         struct dc_cursor_position position;
7297         struct dc_cursor_attributes attributes;
7298         int ret;
7299
7300         if (!plane->state->fb && !old_plane_state->fb)
7301                 return;
7302
7303         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7304                          __func__,
7305                          amdgpu_crtc->crtc_id,
7306                          plane->state->crtc_w,
7307                          plane->state->crtc_h);
7308
7309         ret = get_cursor_position(plane, crtc, &position);
7310         if (ret)
7311                 return;
7312
7313         if (!position.enable) {
7314                 /* turn off cursor */
7315                 if (crtc_state && crtc_state->stream) {
7316                         mutex_lock(&adev->dm.dc_lock);
7317                         dc_stream_set_cursor_position(crtc_state->stream,
7318                                                       &position);
7319                         mutex_unlock(&adev->dm.dc_lock);
7320                 }
7321                 return;
7322         }
7323
7324         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7325         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7326
7327         memset(&attributes, 0, sizeof(attributes));
7328         attributes.address.high_part = upper_32_bits(address);
7329         attributes.address.low_part  = lower_32_bits(address);
7330         attributes.width             = plane->state->crtc_w;
7331         attributes.height            = plane->state->crtc_h;
7332         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7333         attributes.rotation_angle    = 0;
7334         attributes.attribute_flags.value = 0;
7335
7336         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7337
7338         if (crtc_state->stream) {
7339                 mutex_lock(&adev->dm.dc_lock);
7340                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7341                                                          &attributes))
7342                         DRM_ERROR("DC failed to set cursor attributes\n");
7343
7344                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7345                                                    &position))
7346                         DRM_ERROR("DC failed to set cursor position\n");
7347                 mutex_unlock(&adev->dm.dc_lock);
7348         }
7349 }
7350
7351 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7352 {
7353
7354         assert_spin_locked(&acrtc->base.dev->event_lock);
7355         WARN_ON(acrtc->event);
7356
7357         acrtc->event = acrtc->base.state->event;
7358
7359         /* Set the flip status */
7360         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7361
7362         /* Mark this event as consumed */
7363         acrtc->base.state->event = NULL;
7364
7365         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7366                                                  acrtc->crtc_id);
7367 }
7368
7369 static void update_freesync_state_on_stream(
7370         struct amdgpu_display_manager *dm,
7371         struct dm_crtc_state *new_crtc_state,
7372         struct dc_stream_state *new_stream,
7373         struct dc_plane_state *surface,
7374         u32 flip_timestamp_in_us)
7375 {
7376         struct mod_vrr_params vrr_params;
7377         struct dc_info_packet vrr_infopacket = {0};
7378         struct amdgpu_device *adev = dm->adev;
7379         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7380         unsigned long flags;
7381
7382         if (!new_stream)
7383                 return;
7384
7385         /*
7386          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7387          * For now it's sufficient to just guard against these conditions.
7388          */
7389
7390         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7391                 return;
7392
7393         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7394         vrr_params = acrtc->dm_irq_params.vrr_params;
7395
7396         if (surface) {
7397                 mod_freesync_handle_preflip(
7398                         dm->freesync_module,
7399                         surface,
7400                         new_stream,
7401                         flip_timestamp_in_us,
7402                         &vrr_params);
7403
7404                 if (adev->family < AMDGPU_FAMILY_AI &&
7405                     amdgpu_dm_vrr_active(new_crtc_state)) {
7406                         mod_freesync_handle_v_update(dm->freesync_module,
7407                                                      new_stream, &vrr_params);
7408
7409                         /* Need to call this before the frame ends. */
7410                         dc_stream_adjust_vmin_vmax(dm->dc,
7411                                                    new_crtc_state->stream,
7412                                                    &vrr_params.adjust);
7413                 }
7414         }
7415
7416         mod_freesync_build_vrr_infopacket(
7417                 dm->freesync_module,
7418                 new_stream,
7419                 &vrr_params,
7420                 PACKET_TYPE_VRR,
7421                 TRANSFER_FUNC_UNKNOWN,
7422                 &vrr_infopacket);
7423
7424         new_crtc_state->freesync_timing_changed |=
7425                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7426                         &vrr_params.adjust,
7427                         sizeof(vrr_params.adjust)) != 0);
7428
7429         new_crtc_state->freesync_vrr_info_changed |=
7430                 (memcmp(&new_crtc_state->vrr_infopacket,
7431                         &vrr_infopacket,
7432                         sizeof(vrr_infopacket)) != 0);
7433
7434         acrtc->dm_irq_params.vrr_params = vrr_params;
7435         new_crtc_state->vrr_infopacket = vrr_infopacket;
7436
7437         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7438         new_stream->vrr_infopacket = vrr_infopacket;
7439
7440         if (new_crtc_state->freesync_vrr_info_changed)
7441                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7442                               new_crtc_state->base.crtc->base.id,
7443                               (int)new_crtc_state->base.vrr_enabled,
7444                               (int)vrr_params.state);
7445
7446         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7447 }
7448
7449 static void update_stream_irq_parameters(
7450         struct amdgpu_display_manager *dm,
7451         struct dm_crtc_state *new_crtc_state)
7452 {
7453         struct dc_stream_state *new_stream = new_crtc_state->stream;
7454         struct mod_vrr_params vrr_params;
7455         struct mod_freesync_config config = new_crtc_state->freesync_config;
7456         struct amdgpu_device *adev = dm->adev;
7457         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7458         unsigned long flags;
7459
7460         if (!new_stream)
7461                 return;
7462
7463         /*
7464          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7465          * For now it's sufficient to just guard against these conditions.
7466          */
7467         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7468                 return;
7469
7470         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7471         vrr_params = acrtc->dm_irq_params.vrr_params;
7472
7473         if (new_crtc_state->vrr_supported &&
7474             config.min_refresh_in_uhz &&
7475             config.max_refresh_in_uhz) {
7476                 config.state = new_crtc_state->base.vrr_enabled ?
7477                         VRR_STATE_ACTIVE_VARIABLE :
7478                         VRR_STATE_INACTIVE;
7479         } else {
7480                 config.state = VRR_STATE_UNSUPPORTED;
7481         }
7482
7483         mod_freesync_build_vrr_params(dm->freesync_module,
7484                                       new_stream,
7485                                       &config, &vrr_params);
7486
7487         new_crtc_state->freesync_timing_changed |=
7488                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7489                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7490
7491         new_crtc_state->freesync_config = config;
7492         /* Copy state for access from DM IRQ handler */
7493         acrtc->dm_irq_params.freesync_config = config;
7494         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7495         acrtc->dm_irq_params.vrr_params = vrr_params;
7496         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7497 }
7498
7499 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7500                                             struct dm_crtc_state *new_state)
7501 {
7502         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7503         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7504
7505         if (!old_vrr_active && new_vrr_active) {
7506                 /* Transition VRR inactive -> active:
7507                  * While VRR is active, we must not disable vblank irq, as a
7508                  * reenable after disable would compute bogus vblank/pflip
7509                  * timestamps if it likely happened inside display front-porch.
7510                  *
7511                  * We also need vupdate irq for the actual core vblank handling
7512                  * at end of vblank.
7513                  */
7514                 dm_set_vupdate_irq(new_state->base.crtc, true);
7515                 drm_crtc_vblank_get(new_state->base.crtc);
7516                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7517                                  __func__, new_state->base.crtc->base.id);
7518         } else if (old_vrr_active && !new_vrr_active) {
7519                 /* Transition VRR active -> inactive:
7520                  * Allow vblank irq disable again for fixed refresh rate.
7521                  */
7522                 dm_set_vupdate_irq(new_state->base.crtc, false);
7523                 drm_crtc_vblank_put(new_state->base.crtc);
7524                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7525                                  __func__, new_state->base.crtc->base.id);
7526         }
7527 }
7528
7529 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7530 {
7531         struct drm_plane *plane;
7532         struct drm_plane_state *old_plane_state, *new_plane_state;
7533         int i;
7534
7535         /*
7536          * TODO: Make this per-stream so we don't issue redundant updates for
7537          * commits with multiple streams.
7538          */
7539         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7540                                        new_plane_state, i)
7541                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7542                         handle_cursor_update(plane, old_plane_state);
7543 }
7544
7545 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7546                                     struct dc_state *dc_state,
7547                                     struct drm_device *dev,
7548                                     struct amdgpu_display_manager *dm,
7549                                     struct drm_crtc *pcrtc,
7550                                     bool wait_for_vblank)
7551 {
7552         uint32_t i;
7553         uint64_t timestamp_ns;
7554         struct drm_plane *plane;
7555         struct drm_plane_state *old_plane_state, *new_plane_state;
7556         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7557         struct drm_crtc_state *new_pcrtc_state =
7558                         drm_atomic_get_new_crtc_state(state, pcrtc);
7559         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7560         struct dm_crtc_state *dm_old_crtc_state =
7561                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7562         int planes_count = 0, vpos, hpos;
7563         long r;
7564         unsigned long flags;
7565         struct amdgpu_bo *abo;
7566         uint32_t target_vblank, last_flip_vblank;
7567         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7568         bool pflip_present = false;
7569         struct {
7570                 struct dc_surface_update surface_updates[MAX_SURFACES];
7571                 struct dc_plane_info plane_infos[MAX_SURFACES];
7572                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7573                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7574                 struct dc_stream_update stream_update;
7575         } *bundle;
7576
7577         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7578
7579         if (!bundle) {
7580                 dm_error("Failed to allocate update bundle\n");
7581                 goto cleanup;
7582         }
7583
7584         /*
7585          * Disable the cursor first if we're disabling all the planes.
7586          * It'll remain on the screen after the planes are re-enabled
7587          * if we don't.
7588          */
7589         if (acrtc_state->active_planes == 0)
7590                 amdgpu_dm_commit_cursors(state);
7591
7592         /* update planes when needed */
7593         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7594                 struct drm_crtc *crtc = new_plane_state->crtc;
7595                 struct drm_crtc_state *new_crtc_state;
7596                 struct drm_framebuffer *fb = new_plane_state->fb;
7597                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7598                 bool plane_needs_flip;
7599                 struct dc_plane_state *dc_plane;
7600                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7601
7602                 /* Cursor plane is handled after stream updates */
7603                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7604                         continue;
7605
7606                 if (!fb || !crtc || pcrtc != crtc)
7607                         continue;
7608
7609                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7610                 if (!new_crtc_state->active)
7611                         continue;
7612
7613                 dc_plane = dm_new_plane_state->dc_state;
7614
7615                 bundle->surface_updates[planes_count].surface = dc_plane;
7616                 if (new_pcrtc_state->color_mgmt_changed) {
7617                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7618                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7619                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7620                 }
7621
7622                 fill_dc_scaling_info(new_plane_state,
7623                                      &bundle->scaling_infos[planes_count]);
7624
7625                 bundle->surface_updates[planes_count].scaling_info =
7626                         &bundle->scaling_infos[planes_count];
7627
7628                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7629
7630                 pflip_present = pflip_present || plane_needs_flip;
7631
7632                 if (!plane_needs_flip) {
7633                         planes_count += 1;
7634                         continue;
7635                 }
7636
7637                 abo = gem_to_amdgpu_bo(fb->obj[0]);
7638
7639                 /*
7640                  * Wait for all fences on this FB. Do limited wait to avoid
7641                  * deadlock during GPU reset when this fence will not signal
7642                  * but we hold reservation lock for the BO.
7643                  */
7644                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7645                                                         false,
7646                                                         msecs_to_jiffies(5000));
7647                 if (unlikely(r <= 0))
7648                         DRM_ERROR("Waiting for fences timed out!");
7649
7650                 fill_dc_plane_info_and_addr(
7651                         dm->adev, new_plane_state,
7652                         afb->tiling_flags,
7653                         &bundle->plane_infos[planes_count],
7654                         &bundle->flip_addrs[planes_count].address,
7655                         afb->tmz_surface, false);
7656
7657                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7658                                  new_plane_state->plane->index,
7659                                  bundle->plane_infos[planes_count].dcc.enable);
7660
7661                 bundle->surface_updates[planes_count].plane_info =
7662                         &bundle->plane_infos[planes_count];
7663
7664                 /*
7665                  * Only allow immediate flips for fast updates that don't
7666                  * change FB pitch, DCC state, rotation or mirroing.
7667                  */
7668                 bundle->flip_addrs[planes_count].flip_immediate =
7669                         crtc->state->async_flip &&
7670                         acrtc_state->update_type == UPDATE_TYPE_FAST;
7671
7672                 timestamp_ns = ktime_get_ns();
7673                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7674                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7675                 bundle->surface_updates[planes_count].surface = dc_plane;
7676
7677                 if (!bundle->surface_updates[planes_count].surface) {
7678                         DRM_ERROR("No surface for CRTC: id=%d\n",
7679                                         acrtc_attach->crtc_id);
7680                         continue;
7681                 }
7682
7683                 if (plane == pcrtc->primary)
7684                         update_freesync_state_on_stream(
7685                                 dm,
7686                                 acrtc_state,
7687                                 acrtc_state->stream,
7688                                 dc_plane,
7689                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7690
7691                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7692                                  __func__,
7693                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7694                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7695
7696                 planes_count += 1;
7697
7698         }
7699
7700         if (pflip_present) {
7701                 if (!vrr_active) {
7702                         /* Use old throttling in non-vrr fixed refresh rate mode
7703                          * to keep flip scheduling based on target vblank counts
7704                          * working in a backwards compatible way, e.g., for
7705                          * clients using the GLX_OML_sync_control extension or
7706                          * DRI3/Present extension with defined target_msc.
7707                          */
7708                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7709                 }
7710                 else {
7711                         /* For variable refresh rate mode only:
7712                          * Get vblank of last completed flip to avoid > 1 vrr
7713                          * flips per video frame by use of throttling, but allow
7714                          * flip programming anywhere in the possibly large
7715                          * variable vrr vblank interval for fine-grained flip
7716                          * timing control and more opportunity to avoid stutter
7717                          * on late submission of flips.
7718                          */
7719                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7720                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7721                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7722                 }
7723
7724                 target_vblank = last_flip_vblank + wait_for_vblank;
7725
7726                 /*
7727                  * Wait until we're out of the vertical blank period before the one
7728                  * targeted by the flip
7729                  */
7730                 while ((acrtc_attach->enabled &&
7731                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7732                                                             0, &vpos, &hpos, NULL,
7733                                                             NULL, &pcrtc->hwmode)
7734                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7735                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7736                         (int)(target_vblank -
7737                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7738                         usleep_range(1000, 1100);
7739                 }
7740
7741                 /**
7742                  * Prepare the flip event for the pageflip interrupt to handle.
7743                  *
7744                  * This only works in the case where we've already turned on the
7745                  * appropriate hardware blocks (eg. HUBP) so in the transition case
7746                  * from 0 -> n planes we have to skip a hardware generated event
7747                  * and rely on sending it from software.
7748                  */
7749                 if (acrtc_attach->base.state->event &&
7750                     acrtc_state->active_planes > 0) {
7751                         drm_crtc_vblank_get(pcrtc);
7752
7753                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7754
7755                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7756                         prepare_flip_isr(acrtc_attach);
7757
7758                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7759                 }
7760
7761                 if (acrtc_state->stream) {
7762                         if (acrtc_state->freesync_vrr_info_changed)
7763                                 bundle->stream_update.vrr_infopacket =
7764                                         &acrtc_state->stream->vrr_infopacket;
7765                 }
7766         }
7767
7768         /* Update the planes if changed or disable if we don't have any. */
7769         if ((planes_count || acrtc_state->active_planes == 0) &&
7770                 acrtc_state->stream) {
7771                 bundle->stream_update.stream = acrtc_state->stream;
7772                 if (new_pcrtc_state->mode_changed) {
7773                         bundle->stream_update.src = acrtc_state->stream->src;
7774                         bundle->stream_update.dst = acrtc_state->stream->dst;
7775                 }
7776
7777                 if (new_pcrtc_state->color_mgmt_changed) {
7778                         /*
7779                          * TODO: This isn't fully correct since we've actually
7780                          * already modified the stream in place.
7781                          */
7782                         bundle->stream_update.gamut_remap =
7783                                 &acrtc_state->stream->gamut_remap_matrix;
7784                         bundle->stream_update.output_csc_transform =
7785                                 &acrtc_state->stream->csc_color_matrix;
7786                         bundle->stream_update.out_transfer_func =
7787                                 acrtc_state->stream->out_transfer_func;
7788                 }
7789
7790                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7791                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7792                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7793
7794                 /*
7795                  * If FreeSync state on the stream has changed then we need to
7796                  * re-adjust the min/max bounds now that DC doesn't handle this
7797                  * as part of commit.
7798                  */
7799                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7800                     amdgpu_dm_vrr_active(acrtc_state)) {
7801                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7802                         dc_stream_adjust_vmin_vmax(
7803                                 dm->dc, acrtc_state->stream,
7804                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7805                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7806                 }
7807                 mutex_lock(&dm->dc_lock);
7808                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7809                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7810                         amdgpu_dm_psr_disable(acrtc_state->stream);
7811
7812                 dc_commit_updates_for_stream(dm->dc,
7813                                                      bundle->surface_updates,
7814                                                      planes_count,
7815                                                      acrtc_state->stream,
7816                                                      &bundle->stream_update,
7817                                                      dc_state);
7818
7819                 /**
7820                  * Enable or disable the interrupts on the backend.
7821                  *
7822                  * Most pipes are put into power gating when unused.
7823                  *
7824                  * When power gating is enabled on a pipe we lose the
7825                  * interrupt enablement state when power gating is disabled.
7826                  *
7827                  * So we need to update the IRQ control state in hardware
7828                  * whenever the pipe turns on (since it could be previously
7829                  * power gated) or off (since some pipes can't be power gated
7830                  * on some ASICs).
7831                  */
7832                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7833                         dm_update_pflip_irq_state(drm_to_adev(dev),
7834                                                   acrtc_attach);
7835
7836                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7837                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7838                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7839                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
7840                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7841                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7842                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7843                         amdgpu_dm_psr_enable(acrtc_state->stream);
7844                 }
7845
7846                 mutex_unlock(&dm->dc_lock);
7847         }
7848
7849         /*
7850          * Update cursor state *after* programming all the planes.
7851          * This avoids redundant programming in the case where we're going
7852          * to be disabling a single plane - those pipes are being disabled.
7853          */
7854         if (acrtc_state->active_planes)
7855                 amdgpu_dm_commit_cursors(state);
7856
7857 cleanup:
7858         kfree(bundle);
7859 }
7860
7861 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7862                                    struct drm_atomic_state *state)
7863 {
7864         struct amdgpu_device *adev = drm_to_adev(dev);
7865         struct amdgpu_dm_connector *aconnector;
7866         struct drm_connector *connector;
7867         struct drm_connector_state *old_con_state, *new_con_state;
7868         struct drm_crtc_state *new_crtc_state;
7869         struct dm_crtc_state *new_dm_crtc_state;
7870         const struct dc_stream_status *status;
7871         int i, inst;
7872
7873         /* Notify device removals. */
7874         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7875                 if (old_con_state->crtc != new_con_state->crtc) {
7876                         /* CRTC changes require notification. */
7877                         goto notify;
7878                 }
7879
7880                 if (!new_con_state->crtc)
7881                         continue;
7882
7883                 new_crtc_state = drm_atomic_get_new_crtc_state(
7884                         state, new_con_state->crtc);
7885
7886                 if (!new_crtc_state)
7887                         continue;
7888
7889                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7890                         continue;
7891
7892         notify:
7893                 aconnector = to_amdgpu_dm_connector(connector);
7894
7895                 mutex_lock(&adev->dm.audio_lock);
7896                 inst = aconnector->audio_inst;
7897                 aconnector->audio_inst = -1;
7898                 mutex_unlock(&adev->dm.audio_lock);
7899
7900                 amdgpu_dm_audio_eld_notify(adev, inst);
7901         }
7902
7903         /* Notify audio device additions. */
7904         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7905                 if (!new_con_state->crtc)
7906                         continue;
7907
7908                 new_crtc_state = drm_atomic_get_new_crtc_state(
7909                         state, new_con_state->crtc);
7910
7911                 if (!new_crtc_state)
7912                         continue;
7913
7914                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7915                         continue;
7916
7917                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7918                 if (!new_dm_crtc_state->stream)
7919                         continue;
7920
7921                 status = dc_stream_get_status(new_dm_crtc_state->stream);
7922                 if (!status)
7923                         continue;
7924
7925                 aconnector = to_amdgpu_dm_connector(connector);
7926
7927                 mutex_lock(&adev->dm.audio_lock);
7928                 inst = status->audio_inst;
7929                 aconnector->audio_inst = inst;
7930                 mutex_unlock(&adev->dm.audio_lock);
7931
7932                 amdgpu_dm_audio_eld_notify(adev, inst);
7933         }
7934 }
7935
7936 /*
7937  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7938  * @crtc_state: the DRM CRTC state
7939  * @stream_state: the DC stream state.
7940  *
7941  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7942  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7943  */
7944 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7945                                                 struct dc_stream_state *stream_state)
7946 {
7947         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7948 }
7949
7950 /**
7951  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7952  * @state: The atomic state to commit
7953  *
7954  * This will tell DC to commit the constructed DC state from atomic_check,
7955  * programming the hardware. Any failures here implies a hardware failure, since
7956  * atomic check should have filtered anything non-kosher.
7957  */
7958 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7959 {
7960         struct drm_device *dev = state->dev;
7961         struct amdgpu_device *adev = drm_to_adev(dev);
7962         struct amdgpu_display_manager *dm = &adev->dm;
7963         struct dm_atomic_state *dm_state;
7964         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7965         uint32_t i, j;
7966         struct drm_crtc *crtc;
7967         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7968         unsigned long flags;
7969         bool wait_for_vblank = true;
7970         struct drm_connector *connector;
7971         struct drm_connector_state *old_con_state, *new_con_state;
7972         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7973         int crtc_disable_count = 0;
7974         bool mode_set_reset_required = false;
7975
7976         trace_amdgpu_dm_atomic_commit_tail_begin(state);
7977
7978         drm_atomic_helper_update_legacy_modeset_state(dev, state);
7979
7980         dm_state = dm_atomic_get_new_state(state);
7981         if (dm_state && dm_state->context) {
7982                 dc_state = dm_state->context;
7983         } else {
7984                 /* No state changes, retain current state. */
7985                 dc_state_temp = dc_create_state(dm->dc);
7986                 ASSERT(dc_state_temp);
7987                 dc_state = dc_state_temp;
7988                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7989         }
7990
7991         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7992                                        new_crtc_state, i) {
7993                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7994
7995                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7996
7997                 if (old_crtc_state->active &&
7998                     (!new_crtc_state->active ||
7999                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8000                         manage_dm_interrupts(adev, acrtc, false);
8001                         dc_stream_release(dm_old_crtc_state->stream);
8002                 }
8003         }
8004
8005         drm_atomic_helper_calc_timestamping_constants(state);
8006
8007         /* update changed items */
8008         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8009                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8010
8011                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8012                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8013
8014                 DRM_DEBUG_DRIVER(
8015                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8016                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8017                         "connectors_changed:%d\n",
8018                         acrtc->crtc_id,
8019                         new_crtc_state->enable,
8020                         new_crtc_state->active,
8021                         new_crtc_state->planes_changed,
8022                         new_crtc_state->mode_changed,
8023                         new_crtc_state->active_changed,
8024                         new_crtc_state->connectors_changed);
8025
8026                 /* Disable cursor if disabling crtc */
8027                 if (old_crtc_state->active && !new_crtc_state->active) {
8028                         struct dc_cursor_position position;
8029
8030                         memset(&position, 0, sizeof(position));
8031                         mutex_lock(&dm->dc_lock);
8032                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8033                         mutex_unlock(&dm->dc_lock);
8034                 }
8035
8036                 /* Copy all transient state flags into dc state */
8037                 if (dm_new_crtc_state->stream) {
8038                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8039                                                             dm_new_crtc_state->stream);
8040                 }
8041
8042                 /* handles headless hotplug case, updating new_state and
8043                  * aconnector as needed
8044                  */
8045
8046                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8047
8048                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8049
8050                         if (!dm_new_crtc_state->stream) {
8051                                 /*
8052                                  * this could happen because of issues with
8053                                  * userspace notifications delivery.
8054                                  * In this case userspace tries to set mode on
8055                                  * display which is disconnected in fact.
8056                                  * dc_sink is NULL in this case on aconnector.
8057                                  * We expect reset mode will come soon.
8058                                  *
8059                                  * This can also happen when unplug is done
8060                                  * during resume sequence ended
8061                                  *
8062                                  * In this case, we want to pretend we still
8063                                  * have a sink to keep the pipe running so that
8064                                  * hw state is consistent with the sw state
8065                                  */
8066                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8067                                                 __func__, acrtc->base.base.id);
8068                                 continue;
8069                         }
8070
8071                         if (dm_old_crtc_state->stream)
8072                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8073
8074                         pm_runtime_get_noresume(dev->dev);
8075
8076                         acrtc->enabled = true;
8077                         acrtc->hw_mode = new_crtc_state->mode;
8078                         crtc->hwmode = new_crtc_state->mode;
8079                         mode_set_reset_required = true;
8080                 } else if (modereset_required(new_crtc_state)) {
8081                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8082                         /* i.e. reset mode */
8083                         if (dm_old_crtc_state->stream)
8084                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8085                         mode_set_reset_required = true;
8086                 }
8087         } /* for_each_crtc_in_state() */
8088
8089         if (dc_state) {
8090                 /* if there mode set or reset, disable eDP PSR */
8091                 if (mode_set_reset_required)
8092                         amdgpu_dm_psr_disable_all(dm);
8093
8094                 dm_enable_per_frame_crtc_master_sync(dc_state);
8095                 mutex_lock(&dm->dc_lock);
8096                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8097                 mutex_unlock(&dm->dc_lock);
8098         }
8099
8100         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8101                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8102
8103                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8104
8105                 if (dm_new_crtc_state->stream != NULL) {
8106                         const struct dc_stream_status *status =
8107                                         dc_stream_get_status(dm_new_crtc_state->stream);
8108
8109                         if (!status)
8110                                 status = dc_stream_get_status_from_state(dc_state,
8111                                                                          dm_new_crtc_state->stream);
8112                         if (!status)
8113                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8114                         else
8115                                 acrtc->otg_inst = status->primary_otg_inst;
8116                 }
8117         }
8118 #ifdef CONFIG_DRM_AMD_DC_HDCP
8119         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8120                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8121                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8122                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8123
8124                 new_crtc_state = NULL;
8125
8126                 if (acrtc)
8127                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8128
8129                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8130
8131                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8132                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8133                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8134                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8135                         dm_new_con_state->update_hdcp = true;
8136                         continue;
8137                 }
8138
8139                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8140                         hdcp_update_display(
8141                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8142                                 new_con_state->hdcp_content_type,
8143                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8144                                                                                                          : false);
8145         }
8146 #endif
8147
8148         /* Handle connector state changes */
8149         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8150                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8151                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8152                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8153                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8154                 struct dc_stream_update stream_update;
8155                 struct dc_info_packet hdr_packet;
8156                 struct dc_stream_status *status = NULL;
8157                 bool abm_changed, hdr_changed, scaling_changed;
8158
8159                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8160                 memset(&stream_update, 0, sizeof(stream_update));
8161
8162                 if (acrtc) {
8163                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8164                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8165                 }
8166
8167                 /* Skip any modesets/resets */
8168                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8169                         continue;
8170
8171                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8172                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8173
8174                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8175                                                              dm_old_con_state);
8176
8177                 abm_changed = dm_new_crtc_state->abm_level !=
8178                               dm_old_crtc_state->abm_level;
8179
8180                 hdr_changed =
8181                         is_hdr_metadata_different(old_con_state, new_con_state);
8182
8183                 if (!scaling_changed && !abm_changed && !hdr_changed)
8184                         continue;
8185
8186                 stream_update.stream = dm_new_crtc_state->stream;
8187                 if (scaling_changed) {
8188                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8189                                         dm_new_con_state, dm_new_crtc_state->stream);
8190
8191                         stream_update.src = dm_new_crtc_state->stream->src;
8192                         stream_update.dst = dm_new_crtc_state->stream->dst;
8193                 }
8194
8195                 if (abm_changed) {
8196                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8197
8198                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8199                 }
8200
8201                 if (hdr_changed) {
8202                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8203                         stream_update.hdr_static_metadata = &hdr_packet;
8204                 }
8205
8206                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8207                 WARN_ON(!status);
8208                 WARN_ON(!status->plane_count);
8209
8210                 /*
8211                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8212                  * Here we create an empty update on each plane.
8213                  * To fix this, DC should permit updating only stream properties.
8214                  */
8215                 for (j = 0; j < status->plane_count; j++)
8216                         dummy_updates[j].surface = status->plane_states[0];
8217
8218
8219                 mutex_lock(&dm->dc_lock);
8220                 dc_commit_updates_for_stream(dm->dc,
8221                                                      dummy_updates,
8222                                                      status->plane_count,
8223                                                      dm_new_crtc_state->stream,
8224                                                      &stream_update,
8225                                                      dc_state);
8226                 mutex_unlock(&dm->dc_lock);
8227         }
8228
8229         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8230         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8231                                       new_crtc_state, i) {
8232                 if (old_crtc_state->active && !new_crtc_state->active)
8233                         crtc_disable_count++;
8234
8235                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8236                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8237
8238                 /* For freesync config update on crtc state and params for irq */
8239                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8240
8241                 /* Handle vrr on->off / off->on transitions */
8242                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8243                                                 dm_new_crtc_state);
8244         }
8245
8246         /**
8247          * Enable interrupts for CRTCs that are newly enabled or went through
8248          * a modeset. It was intentionally deferred until after the front end
8249          * state was modified to wait until the OTG was on and so the IRQ
8250          * handlers didn't access stale or invalid state.
8251          */
8252         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8253                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8254
8255                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8256
8257                 if (new_crtc_state->active &&
8258                     (!old_crtc_state->active ||
8259                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8260                         dc_stream_retain(dm_new_crtc_state->stream);
8261                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8262                         manage_dm_interrupts(adev, acrtc, true);
8263
8264 #ifdef CONFIG_DEBUG_FS
8265                         /**
8266                          * Frontend may have changed so reapply the CRC capture
8267                          * settings for the stream.
8268                          */
8269                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8270
8271                         if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8272                                 amdgpu_dm_crtc_configure_crc_source(
8273                                         crtc, dm_new_crtc_state,
8274                                         dm_new_crtc_state->crc_src);
8275                         }
8276 #endif
8277                 }
8278         }
8279
8280         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8281                 if (new_crtc_state->async_flip)
8282                         wait_for_vblank = false;
8283
8284         /* update planes when needed per crtc*/
8285         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8286                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8287
8288                 if (dm_new_crtc_state->stream)
8289                         amdgpu_dm_commit_planes(state, dc_state, dev,
8290                                                 dm, crtc, wait_for_vblank);
8291         }
8292
8293         /* Update audio instances for each connector. */
8294         amdgpu_dm_commit_audio(dev, state);
8295
8296         /*
8297          * send vblank event on all events not handled in flip and
8298          * mark consumed event for drm_atomic_helper_commit_hw_done
8299          */
8300         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8301         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8302
8303                 if (new_crtc_state->event)
8304                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8305
8306                 new_crtc_state->event = NULL;
8307         }
8308         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8309
8310         /* Signal HW programming completion */
8311         drm_atomic_helper_commit_hw_done(state);
8312
8313         if (wait_for_vblank)
8314                 drm_atomic_helper_wait_for_flip_done(dev, state);
8315
8316         drm_atomic_helper_cleanup_planes(dev, state);
8317
8318         /* return the stolen vga memory back to VRAM */
8319         if (!adev->mman.keep_stolen_vga_memory)
8320                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8321         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8322
8323         /*
8324          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8325          * so we can put the GPU into runtime suspend if we're not driving any
8326          * displays anymore
8327          */
8328         for (i = 0; i < crtc_disable_count; i++)
8329                 pm_runtime_put_autosuspend(dev->dev);
8330         pm_runtime_mark_last_busy(dev->dev);
8331
8332         if (dc_state_temp)
8333                 dc_release_state(dc_state_temp);
8334 }
8335
8336
8337 static int dm_force_atomic_commit(struct drm_connector *connector)
8338 {
8339         int ret = 0;
8340         struct drm_device *ddev = connector->dev;
8341         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8342         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8343         struct drm_plane *plane = disconnected_acrtc->base.primary;
8344         struct drm_connector_state *conn_state;
8345         struct drm_crtc_state *crtc_state;
8346         struct drm_plane_state *plane_state;
8347
8348         if (!state)
8349                 return -ENOMEM;
8350
8351         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8352
8353         /* Construct an atomic state to restore previous display setting */
8354
8355         /*
8356          * Attach connectors to drm_atomic_state
8357          */
8358         conn_state = drm_atomic_get_connector_state(state, connector);
8359
8360         ret = PTR_ERR_OR_ZERO(conn_state);
8361         if (ret)
8362                 goto err;
8363
8364         /* Attach crtc to drm_atomic_state*/
8365         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8366
8367         ret = PTR_ERR_OR_ZERO(crtc_state);
8368         if (ret)
8369                 goto err;
8370
8371         /* force a restore */
8372         crtc_state->mode_changed = true;
8373
8374         /* Attach plane to drm_atomic_state */
8375         plane_state = drm_atomic_get_plane_state(state, plane);
8376
8377         ret = PTR_ERR_OR_ZERO(plane_state);
8378         if (ret)
8379                 goto err;
8380
8381
8382         /* Call commit internally with the state we just constructed */
8383         ret = drm_atomic_commit(state);
8384         if (!ret)
8385                 return 0;
8386
8387 err:
8388         DRM_ERROR("Restoring old state failed with %i\n", ret);
8389         drm_atomic_state_put(state);
8390
8391         return ret;
8392 }
8393
8394 /*
8395  * This function handles all cases when set mode does not come upon hotplug.
8396  * This includes when a display is unplugged then plugged back into the
8397  * same port and when running without usermode desktop manager supprot
8398  */
8399 void dm_restore_drm_connector_state(struct drm_device *dev,
8400                                     struct drm_connector *connector)
8401 {
8402         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8403         struct amdgpu_crtc *disconnected_acrtc;
8404         struct dm_crtc_state *acrtc_state;
8405
8406         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8407                 return;
8408
8409         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8410         if (!disconnected_acrtc)
8411                 return;
8412
8413         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8414         if (!acrtc_state->stream)
8415                 return;
8416
8417         /*
8418          * If the previous sink is not released and different from the current,
8419          * we deduce we are in a state where we can not rely on usermode call
8420          * to turn on the display, so we do it here
8421          */
8422         if (acrtc_state->stream->sink != aconnector->dc_sink)
8423                 dm_force_atomic_commit(&aconnector->base);
8424 }
8425
8426 /*
8427  * Grabs all modesetting locks to serialize against any blocking commits,
8428  * Waits for completion of all non blocking commits.
8429  */
8430 static int do_aquire_global_lock(struct drm_device *dev,
8431                                  struct drm_atomic_state *state)
8432 {
8433         struct drm_crtc *crtc;
8434         struct drm_crtc_commit *commit;
8435         long ret;
8436
8437         /*
8438          * Adding all modeset locks to aquire_ctx will
8439          * ensure that when the framework release it the
8440          * extra locks we are locking here will get released to
8441          */
8442         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8443         if (ret)
8444                 return ret;
8445
8446         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8447                 spin_lock(&crtc->commit_lock);
8448                 commit = list_first_entry_or_null(&crtc->commit_list,
8449                                 struct drm_crtc_commit, commit_entry);
8450                 if (commit)
8451                         drm_crtc_commit_get(commit);
8452                 spin_unlock(&crtc->commit_lock);
8453
8454                 if (!commit)
8455                         continue;
8456
8457                 /*
8458                  * Make sure all pending HW programming completed and
8459                  * page flips done
8460                  */
8461                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8462
8463                 if (ret > 0)
8464                         ret = wait_for_completion_interruptible_timeout(
8465                                         &commit->flip_done, 10*HZ);
8466
8467                 if (ret == 0)
8468                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8469                                   "timed out\n", crtc->base.id, crtc->name);
8470
8471                 drm_crtc_commit_put(commit);
8472         }
8473
8474         return ret < 0 ? ret : 0;
8475 }
8476
8477 static void get_freesync_config_for_crtc(
8478         struct dm_crtc_state *new_crtc_state,
8479         struct dm_connector_state *new_con_state)
8480 {
8481         struct mod_freesync_config config = {0};
8482         struct amdgpu_dm_connector *aconnector =
8483                         to_amdgpu_dm_connector(new_con_state->base.connector);
8484         struct drm_display_mode *mode = &new_crtc_state->base.mode;
8485         int vrefresh = drm_mode_vrefresh(mode);
8486
8487         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8488                                         vrefresh >= aconnector->min_vfreq &&
8489                                         vrefresh <= aconnector->max_vfreq;
8490
8491         if (new_crtc_state->vrr_supported) {
8492                 new_crtc_state->stream->ignore_msa_timing_param = true;
8493                 config.state = new_crtc_state->base.vrr_enabled ?
8494                                 VRR_STATE_ACTIVE_VARIABLE :
8495                                 VRR_STATE_INACTIVE;
8496                 config.min_refresh_in_uhz =
8497                                 aconnector->min_vfreq * 1000000;
8498                 config.max_refresh_in_uhz =
8499                                 aconnector->max_vfreq * 1000000;
8500                 config.vsif_supported = true;
8501                 config.btr = true;
8502         }
8503
8504         new_crtc_state->freesync_config = config;
8505 }
8506
8507 static void reset_freesync_config_for_crtc(
8508         struct dm_crtc_state *new_crtc_state)
8509 {
8510         new_crtc_state->vrr_supported = false;
8511
8512         memset(&new_crtc_state->vrr_infopacket, 0,
8513                sizeof(new_crtc_state->vrr_infopacket));
8514 }
8515
8516 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8517                                 struct drm_atomic_state *state,
8518                                 struct drm_crtc *crtc,
8519                                 struct drm_crtc_state *old_crtc_state,
8520                                 struct drm_crtc_state *new_crtc_state,
8521                                 bool enable,
8522                                 bool *lock_and_validation_needed)
8523 {
8524         struct dm_atomic_state *dm_state = NULL;
8525         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8526         struct dc_stream_state *new_stream;
8527         int ret = 0;
8528
8529         /*
8530          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8531          * update changed items
8532          */
8533         struct amdgpu_crtc *acrtc = NULL;
8534         struct amdgpu_dm_connector *aconnector = NULL;
8535         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8536         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8537
8538         new_stream = NULL;
8539
8540         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8541         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8542         acrtc = to_amdgpu_crtc(crtc);
8543         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8544
8545         /* TODO This hack should go away */
8546         if (aconnector && enable) {
8547                 /* Make sure fake sink is created in plug-in scenario */
8548                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8549                                                             &aconnector->base);
8550                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8551                                                             &aconnector->base);
8552
8553                 if (IS_ERR(drm_new_conn_state)) {
8554                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8555                         goto fail;
8556                 }
8557
8558                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8559                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8560
8561                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8562                         goto skip_modeset;
8563
8564                 new_stream = create_validate_stream_for_sink(aconnector,
8565                                                              &new_crtc_state->mode,
8566                                                              dm_new_conn_state,
8567                                                              dm_old_crtc_state->stream);
8568
8569                 /*
8570                  * we can have no stream on ACTION_SET if a display
8571                  * was disconnected during S3, in this case it is not an
8572                  * error, the OS will be updated after detection, and
8573                  * will do the right thing on next atomic commit
8574                  */
8575
8576                 if (!new_stream) {
8577                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8578                                         __func__, acrtc->base.base.id);
8579                         ret = -ENOMEM;
8580                         goto fail;
8581                 }
8582
8583                 /*
8584                  * TODO: Check VSDB bits to decide whether this should
8585                  * be enabled or not.
8586                  */
8587                 new_stream->triggered_crtc_reset.enabled =
8588                         dm->force_timing_sync;
8589
8590                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8591
8592                 ret = fill_hdr_info_packet(drm_new_conn_state,
8593                                            &new_stream->hdr_static_metadata);
8594                 if (ret)
8595                         goto fail;
8596
8597                 /*
8598                  * If we already removed the old stream from the context
8599                  * (and set the new stream to NULL) then we can't reuse
8600                  * the old stream even if the stream and scaling are unchanged.
8601                  * We'll hit the BUG_ON and black screen.
8602                  *
8603                  * TODO: Refactor this function to allow this check to work
8604                  * in all conditions.
8605                  */
8606                 if (dm_new_crtc_state->stream &&
8607                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8608                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8609                         new_crtc_state->mode_changed = false;
8610                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8611                                          new_crtc_state->mode_changed);
8612                 }
8613         }
8614
8615         /* mode_changed flag may get updated above, need to check again */
8616         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8617                 goto skip_modeset;
8618
8619         DRM_DEBUG_DRIVER(
8620                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8621                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8622                 "connectors_changed:%d\n",
8623                 acrtc->crtc_id,
8624                 new_crtc_state->enable,
8625                 new_crtc_state->active,
8626                 new_crtc_state->planes_changed,
8627                 new_crtc_state->mode_changed,
8628                 new_crtc_state->active_changed,
8629                 new_crtc_state->connectors_changed);
8630
8631         /* Remove stream for any changed/disabled CRTC */
8632         if (!enable) {
8633
8634                 if (!dm_old_crtc_state->stream)
8635                         goto skip_modeset;
8636
8637                 ret = dm_atomic_get_state(state, &dm_state);
8638                 if (ret)
8639                         goto fail;
8640
8641                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8642                                 crtc->base.id);
8643
8644                 /* i.e. reset mode */
8645                 if (dc_remove_stream_from_ctx(
8646                                 dm->dc,
8647                                 dm_state->context,
8648                                 dm_old_crtc_state->stream) != DC_OK) {
8649                         ret = -EINVAL;
8650                         goto fail;
8651                 }
8652
8653                 dc_stream_release(dm_old_crtc_state->stream);
8654                 dm_new_crtc_state->stream = NULL;
8655
8656                 reset_freesync_config_for_crtc(dm_new_crtc_state);
8657
8658                 *lock_and_validation_needed = true;
8659
8660         } else {/* Add stream for any updated/enabled CRTC */
8661                 /*
8662                  * Quick fix to prevent NULL pointer on new_stream when
8663                  * added MST connectors not found in existing crtc_state in the chained mode
8664                  * TODO: need to dig out the root cause of that
8665                  */
8666                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8667                         goto skip_modeset;
8668
8669                 if (modereset_required(new_crtc_state))
8670                         goto skip_modeset;
8671
8672                 if (modeset_required(new_crtc_state, new_stream,
8673                                      dm_old_crtc_state->stream)) {
8674
8675                         WARN_ON(dm_new_crtc_state->stream);
8676
8677                         ret = dm_atomic_get_state(state, &dm_state);
8678                         if (ret)
8679                                 goto fail;
8680
8681                         dm_new_crtc_state->stream = new_stream;
8682
8683                         dc_stream_retain(new_stream);
8684
8685                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8686                                                 crtc->base.id);
8687
8688                         if (dc_add_stream_to_ctx(
8689                                         dm->dc,
8690                                         dm_state->context,
8691                                         dm_new_crtc_state->stream) != DC_OK) {
8692                                 ret = -EINVAL;
8693                                 goto fail;
8694                         }
8695
8696                         *lock_and_validation_needed = true;
8697                 }
8698         }
8699
8700 skip_modeset:
8701         /* Release extra reference */
8702         if (new_stream)
8703                  dc_stream_release(new_stream);
8704
8705         /*
8706          * We want to do dc stream updates that do not require a
8707          * full modeset below.
8708          */
8709         if (!(enable && aconnector && new_crtc_state->active))
8710                 return 0;
8711         /*
8712          * Given above conditions, the dc state cannot be NULL because:
8713          * 1. We're in the process of enabling CRTCs (just been added
8714          *    to the dc context, or already is on the context)
8715          * 2. Has a valid connector attached, and
8716          * 3. Is currently active and enabled.
8717          * => The dc stream state currently exists.
8718          */
8719         BUG_ON(dm_new_crtc_state->stream == NULL);
8720
8721         /* Scaling or underscan settings */
8722         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8723                 update_stream_scaling_settings(
8724                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8725
8726         /* ABM settings */
8727         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8728
8729         /*
8730          * Color management settings. We also update color properties
8731          * when a modeset is needed, to ensure it gets reprogrammed.
8732          */
8733         if (dm_new_crtc_state->base.color_mgmt_changed ||
8734             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8735                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8736                 if (ret)
8737                         goto fail;
8738         }
8739
8740         /* Update Freesync settings. */
8741         get_freesync_config_for_crtc(dm_new_crtc_state,
8742                                      dm_new_conn_state);
8743
8744         return ret;
8745
8746 fail:
8747         if (new_stream)
8748                 dc_stream_release(new_stream);
8749         return ret;
8750 }
8751
8752 static bool should_reset_plane(struct drm_atomic_state *state,
8753                                struct drm_plane *plane,
8754                                struct drm_plane_state *old_plane_state,
8755                                struct drm_plane_state *new_plane_state)
8756 {
8757         struct drm_plane *other;
8758         struct drm_plane_state *old_other_state, *new_other_state;
8759         struct drm_crtc_state *new_crtc_state;
8760         int i;
8761
8762         /*
8763          * TODO: Remove this hack once the checks below are sufficient
8764          * enough to determine when we need to reset all the planes on
8765          * the stream.
8766          */
8767         if (state->allow_modeset)
8768                 return true;
8769
8770         /* Exit early if we know that we're adding or removing the plane. */
8771         if (old_plane_state->crtc != new_plane_state->crtc)
8772                 return true;
8773
8774         /* old crtc == new_crtc == NULL, plane not in context. */
8775         if (!new_plane_state->crtc)
8776                 return false;
8777
8778         new_crtc_state =
8779                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8780
8781         if (!new_crtc_state)
8782                 return true;
8783
8784         /* CRTC Degamma changes currently require us to recreate planes. */
8785         if (new_crtc_state->color_mgmt_changed)
8786                 return true;
8787
8788         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8789                 return true;
8790
8791         /*
8792          * If there are any new primary or overlay planes being added or
8793          * removed then the z-order can potentially change. To ensure
8794          * correct z-order and pipe acquisition the current DC architecture
8795          * requires us to remove and recreate all existing planes.
8796          *
8797          * TODO: Come up with a more elegant solution for this.
8798          */
8799         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8800                 struct amdgpu_framebuffer *old_afb, *new_afb;
8801                 if (other->type == DRM_PLANE_TYPE_CURSOR)
8802                         continue;
8803
8804                 if (old_other_state->crtc != new_plane_state->crtc &&
8805                     new_other_state->crtc != new_plane_state->crtc)
8806                         continue;
8807
8808                 if (old_other_state->crtc != new_other_state->crtc)
8809                         return true;
8810
8811                 /* Src/dst size and scaling updates. */
8812                 if (old_other_state->src_w != new_other_state->src_w ||
8813                     old_other_state->src_h != new_other_state->src_h ||
8814                     old_other_state->crtc_w != new_other_state->crtc_w ||
8815                     old_other_state->crtc_h != new_other_state->crtc_h)
8816                         return true;
8817
8818                 /* Rotation / mirroring updates. */
8819                 if (old_other_state->rotation != new_other_state->rotation)
8820                         return true;
8821
8822                 /* Blending updates. */
8823                 if (old_other_state->pixel_blend_mode !=
8824                     new_other_state->pixel_blend_mode)
8825                         return true;
8826
8827                 /* Alpha updates. */
8828                 if (old_other_state->alpha != new_other_state->alpha)
8829                         return true;
8830
8831                 /* Colorspace changes. */
8832                 if (old_other_state->color_range != new_other_state->color_range ||
8833                     old_other_state->color_encoding != new_other_state->color_encoding)
8834                         return true;
8835
8836                 /* Framebuffer checks fall at the end. */
8837                 if (!old_other_state->fb || !new_other_state->fb)
8838                         continue;
8839
8840                 /* Pixel format changes can require bandwidth updates. */
8841                 if (old_other_state->fb->format != new_other_state->fb->format)
8842                         return true;
8843
8844                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8845                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8846
8847                 /* Tiling and DCC changes also require bandwidth updates. */
8848                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8849                     old_afb->base.modifier != new_afb->base.modifier)
8850                         return true;
8851         }
8852
8853         return false;
8854 }
8855
8856 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8857                               struct drm_plane_state *new_plane_state,
8858                               struct drm_framebuffer *fb)
8859 {
8860         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8861         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8862         unsigned int pitch;
8863         bool linear;
8864
8865         if (fb->width > new_acrtc->max_cursor_width ||
8866             fb->height > new_acrtc->max_cursor_height) {
8867                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8868                                  new_plane_state->fb->width,
8869                                  new_plane_state->fb->height);
8870                 return -EINVAL;
8871         }
8872         if (new_plane_state->src_w != fb->width << 16 ||
8873             new_plane_state->src_h != fb->height << 16) {
8874                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8875                 return -EINVAL;
8876         }
8877
8878         /* Pitch in pixels */
8879         pitch = fb->pitches[0] / fb->format->cpp[0];
8880
8881         if (fb->width != pitch) {
8882                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
8883                                  fb->width, pitch);
8884                 return -EINVAL;
8885         }
8886
8887         switch (pitch) {
8888         case 64:
8889         case 128:
8890         case 256:
8891                 /* FB pitch is supported by cursor plane */
8892                 break;
8893         default:
8894                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
8895                 return -EINVAL;
8896         }
8897
8898         /* Core DRM takes care of checking FB modifiers, so we only need to
8899          * check tiling flags when the FB doesn't have a modifier. */
8900         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
8901                 if (adev->family < AMDGPU_FAMILY_AI) {
8902                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
8903                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
8904                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
8905                 } else {
8906                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
8907                 }
8908                 if (!linear) {
8909                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
8910                         return -EINVAL;
8911                 }
8912         }
8913
8914         return 0;
8915 }
8916
8917 static int dm_update_plane_state(struct dc *dc,
8918                                  struct drm_atomic_state *state,
8919                                  struct drm_plane *plane,
8920                                  struct drm_plane_state *old_plane_state,
8921                                  struct drm_plane_state *new_plane_state,
8922                                  bool enable,
8923                                  bool *lock_and_validation_needed)
8924 {
8925
8926         struct dm_atomic_state *dm_state = NULL;
8927         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8928         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8929         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8930         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8931         struct amdgpu_crtc *new_acrtc;
8932         bool needs_reset;
8933         int ret = 0;
8934
8935
8936         new_plane_crtc = new_plane_state->crtc;
8937         old_plane_crtc = old_plane_state->crtc;
8938         dm_new_plane_state = to_dm_plane_state(new_plane_state);
8939         dm_old_plane_state = to_dm_plane_state(old_plane_state);
8940
8941         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8942                 if (!enable || !new_plane_crtc ||
8943                         drm_atomic_plane_disabling(plane->state, new_plane_state))
8944                         return 0;
8945
8946                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8947
8948                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
8949                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8950                         return -EINVAL;
8951                 }
8952
8953                 if (new_plane_state->fb) {
8954                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
8955                                                  new_plane_state->fb);
8956                         if (ret)
8957                                 return ret;
8958                 }
8959
8960                 return 0;
8961         }
8962
8963         needs_reset = should_reset_plane(state, plane, old_plane_state,
8964                                          new_plane_state);
8965
8966         /* Remove any changed/removed planes */
8967         if (!enable) {
8968                 if (!needs_reset)
8969                         return 0;
8970
8971                 if (!old_plane_crtc)
8972                         return 0;
8973
8974                 old_crtc_state = drm_atomic_get_old_crtc_state(
8975                                 state, old_plane_crtc);
8976                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8977
8978                 if (!dm_old_crtc_state->stream)
8979                         return 0;
8980
8981                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8982                                 plane->base.id, old_plane_crtc->base.id);
8983
8984                 ret = dm_atomic_get_state(state, &dm_state);
8985                 if (ret)
8986                         return ret;
8987
8988                 if (!dc_remove_plane_from_context(
8989                                 dc,
8990                                 dm_old_crtc_state->stream,
8991                                 dm_old_plane_state->dc_state,
8992                                 dm_state->context)) {
8993
8994                         return -EINVAL;
8995                 }
8996
8997
8998                 dc_plane_state_release(dm_old_plane_state->dc_state);
8999                 dm_new_plane_state->dc_state = NULL;
9000
9001                 *lock_and_validation_needed = true;
9002
9003         } else { /* Add new planes */
9004                 struct dc_plane_state *dc_new_plane_state;
9005
9006                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9007                         return 0;
9008
9009                 if (!new_plane_crtc)
9010                         return 0;
9011
9012                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9013                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9014
9015                 if (!dm_new_crtc_state->stream)
9016                         return 0;
9017
9018                 if (!needs_reset)
9019                         return 0;
9020
9021                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9022                 if (ret)
9023                         return ret;
9024
9025                 WARN_ON(dm_new_plane_state->dc_state);
9026
9027                 dc_new_plane_state = dc_create_plane_state(dc);
9028                 if (!dc_new_plane_state)
9029                         return -ENOMEM;
9030
9031                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9032                                 plane->base.id, new_plane_crtc->base.id);
9033
9034                 ret = fill_dc_plane_attributes(
9035                         drm_to_adev(new_plane_crtc->dev),
9036                         dc_new_plane_state,
9037                         new_plane_state,
9038                         new_crtc_state);
9039                 if (ret) {
9040                         dc_plane_state_release(dc_new_plane_state);
9041                         return ret;
9042                 }
9043
9044                 ret = dm_atomic_get_state(state, &dm_state);
9045                 if (ret) {
9046                         dc_plane_state_release(dc_new_plane_state);
9047                         return ret;
9048                 }
9049
9050                 /*
9051                  * Any atomic check errors that occur after this will
9052                  * not need a release. The plane state will be attached
9053                  * to the stream, and therefore part of the atomic
9054                  * state. It'll be released when the atomic state is
9055                  * cleaned.
9056                  */
9057                 if (!dc_add_plane_to_context(
9058                                 dc,
9059                                 dm_new_crtc_state->stream,
9060                                 dc_new_plane_state,
9061                                 dm_state->context)) {
9062
9063                         dc_plane_state_release(dc_new_plane_state);
9064                         return -EINVAL;
9065                 }
9066
9067                 dm_new_plane_state->dc_state = dc_new_plane_state;
9068
9069                 /* Tell DC to do a full surface update every time there
9070                  * is a plane change. Inefficient, but works for now.
9071                  */
9072                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9073
9074                 *lock_and_validation_needed = true;
9075         }
9076
9077
9078         return ret;
9079 }
9080
9081 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9082                                 struct drm_crtc *crtc,
9083                                 struct drm_crtc_state *new_crtc_state)
9084 {
9085         struct drm_plane_state *new_cursor_state, *new_primary_state;
9086         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9087
9088         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9089          * cursor per pipe but it's going to inherit the scaling and
9090          * positioning from the underlying pipe. Check the cursor plane's
9091          * blending properties match the primary plane's. */
9092
9093         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9094         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9095         if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9096                 return 0;
9097         }
9098
9099         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9100                          (new_cursor_state->src_w >> 16);
9101         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9102                          (new_cursor_state->src_h >> 16);
9103
9104         primary_scale_w = new_primary_state->crtc_w * 1000 /
9105                          (new_primary_state->src_w >> 16);
9106         primary_scale_h = new_primary_state->crtc_h * 1000 /
9107                          (new_primary_state->src_h >> 16);
9108
9109         if (cursor_scale_w != primary_scale_w ||
9110             cursor_scale_h != primary_scale_h) {
9111                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9112                 return -EINVAL;
9113         }
9114
9115         return 0;
9116 }
9117
9118 #if defined(CONFIG_DRM_AMD_DC_DCN)
9119 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9120 {
9121         struct drm_connector *connector;
9122         struct drm_connector_state *conn_state;
9123         struct amdgpu_dm_connector *aconnector = NULL;
9124         int i;
9125         for_each_new_connector_in_state(state, connector, conn_state, i) {
9126                 if (conn_state->crtc != crtc)
9127                         continue;
9128
9129                 aconnector = to_amdgpu_dm_connector(connector);
9130                 if (!aconnector->port || !aconnector->mst_port)
9131                         aconnector = NULL;
9132                 else
9133                         break;
9134         }
9135
9136         if (!aconnector)
9137                 return 0;
9138
9139         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9140 }
9141 #endif
9142
9143 /**
9144  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9145  * @dev: The DRM device
9146  * @state: The atomic state to commit
9147  *
9148  * Validate that the given atomic state is programmable by DC into hardware.
9149  * This involves constructing a &struct dc_state reflecting the new hardware
9150  * state we wish to commit, then querying DC to see if it is programmable. It's
9151  * important not to modify the existing DC state. Otherwise, atomic_check
9152  * may unexpectedly commit hardware changes.
9153  *
9154  * When validating the DC state, it's important that the right locks are
9155  * acquired. For full updates case which removes/adds/updates streams on one
9156  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9157  * that any such full update commit will wait for completion of any outstanding
9158  * flip using DRMs synchronization events.
9159  *
9160  * Note that DM adds the affected connectors for all CRTCs in state, when that
9161  * might not seem necessary. This is because DC stream creation requires the
9162  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9163  * be possible but non-trivial - a possible TODO item.
9164  *
9165  * Return: -Error code if validation failed.
9166  */
9167 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9168                                   struct drm_atomic_state *state)
9169 {
9170         struct amdgpu_device *adev = drm_to_adev(dev);
9171         struct dm_atomic_state *dm_state = NULL;
9172         struct dc *dc = adev->dm.dc;
9173         struct drm_connector *connector;
9174         struct drm_connector_state *old_con_state, *new_con_state;
9175         struct drm_crtc *crtc;
9176         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9177         struct drm_plane *plane;
9178         struct drm_plane_state *old_plane_state, *new_plane_state;
9179         enum dc_status status;
9180         int ret, i;
9181         bool lock_and_validation_needed = false;
9182         struct dm_crtc_state *dm_old_crtc_state;
9183
9184         trace_amdgpu_dm_atomic_check_begin(state);
9185
9186         ret = drm_atomic_helper_check_modeset(dev, state);
9187         if (ret)
9188                 goto fail;
9189
9190         /* Check connector changes */
9191         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9192                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9193                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9194
9195                 /* Skip connectors that are disabled or part of modeset already. */
9196                 if (!old_con_state->crtc && !new_con_state->crtc)
9197                         continue;
9198
9199                 if (!new_con_state->crtc)
9200                         continue;
9201
9202                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9203                 if (IS_ERR(new_crtc_state)) {
9204                         ret = PTR_ERR(new_crtc_state);
9205                         goto fail;
9206                 }
9207
9208                 if (dm_old_con_state->abm_level !=
9209                     dm_new_con_state->abm_level)
9210                         new_crtc_state->connectors_changed = true;
9211         }
9212
9213 #if defined(CONFIG_DRM_AMD_DC_DCN)
9214         if (adev->asic_type >= CHIP_NAVI10) {
9215                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9216                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9217                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9218                                 if (ret)
9219                                         goto fail;
9220                         }
9221                 }
9222         }
9223 #endif
9224         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9225                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9226
9227                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9228                     !new_crtc_state->color_mgmt_changed &&
9229                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9230                         dm_old_crtc_state->dsc_force_changed == false)
9231                         continue;
9232
9233                 if (!new_crtc_state->enable)
9234                         continue;
9235
9236                 ret = drm_atomic_add_affected_connectors(state, crtc);
9237                 if (ret)
9238                         return ret;
9239
9240                 ret = drm_atomic_add_affected_planes(state, crtc);
9241                 if (ret)
9242                         goto fail;
9243
9244                 if (dm_old_crtc_state->dsc_force_changed)
9245                         new_crtc_state->mode_changed = true;
9246         }
9247
9248         /*
9249          * Add all primary and overlay planes on the CRTC to the state
9250          * whenever a plane is enabled to maintain correct z-ordering
9251          * and to enable fast surface updates.
9252          */
9253         drm_for_each_crtc(crtc, dev) {
9254                 bool modified = false;
9255
9256                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9257                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9258                                 continue;
9259
9260                         if (new_plane_state->crtc == crtc ||
9261                             old_plane_state->crtc == crtc) {
9262                                 modified = true;
9263                                 break;
9264                         }
9265                 }
9266
9267                 if (!modified)
9268                         continue;
9269
9270                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9271                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9272                                 continue;
9273
9274                         new_plane_state =
9275                                 drm_atomic_get_plane_state(state, plane);
9276
9277                         if (IS_ERR(new_plane_state)) {
9278                                 ret = PTR_ERR(new_plane_state);
9279                                 goto fail;
9280                         }
9281                 }
9282         }
9283
9284         /* Remove exiting planes if they are modified */
9285         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9286                 ret = dm_update_plane_state(dc, state, plane,
9287                                             old_plane_state,
9288                                             new_plane_state,
9289                                             false,
9290                                             &lock_and_validation_needed);
9291                 if (ret)
9292                         goto fail;
9293         }
9294
9295         /* Disable all crtcs which require disable */
9296         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9297                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9298                                            old_crtc_state,
9299                                            new_crtc_state,
9300                                            false,
9301                                            &lock_and_validation_needed);
9302                 if (ret)
9303                         goto fail;
9304         }
9305
9306         /* Enable all crtcs which require enable */
9307         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9308                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9309                                            old_crtc_state,
9310                                            new_crtc_state,
9311                                            true,
9312                                            &lock_and_validation_needed);
9313                 if (ret)
9314                         goto fail;
9315         }
9316
9317         /* Add new/modified planes */
9318         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9319                 ret = dm_update_plane_state(dc, state, plane,
9320                                             old_plane_state,
9321                                             new_plane_state,
9322                                             true,
9323                                             &lock_and_validation_needed);
9324                 if (ret)
9325                         goto fail;
9326         }
9327
9328         /* Run this here since we want to validate the streams we created */
9329         ret = drm_atomic_helper_check_planes(dev, state);
9330         if (ret)
9331                 goto fail;
9332
9333         /* Check cursor planes scaling */
9334         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9335                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9336                 if (ret)
9337                         goto fail;
9338         }
9339
9340         if (state->legacy_cursor_update) {
9341                 /*
9342                  * This is a fast cursor update coming from the plane update
9343                  * helper, check if it can be done asynchronously for better
9344                  * performance.
9345                  */
9346                 state->async_update =
9347                         !drm_atomic_helper_async_check(dev, state);
9348
9349                 /*
9350                  * Skip the remaining global validation if this is an async
9351                  * update. Cursor updates can be done without affecting
9352                  * state or bandwidth calcs and this avoids the performance
9353                  * penalty of locking the private state object and
9354                  * allocating a new dc_state.
9355                  */
9356                 if (state->async_update)
9357                         return 0;
9358         }
9359
9360         /* Check scaling and underscan changes*/
9361         /* TODO Removed scaling changes validation due to inability to commit
9362          * new stream into context w\o causing full reset. Need to
9363          * decide how to handle.
9364          */
9365         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9366                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9367                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9368                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9369
9370                 /* Skip any modesets/resets */
9371                 if (!acrtc || drm_atomic_crtc_needs_modeset(
9372                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9373                         continue;
9374
9375                 /* Skip any thing not scale or underscan changes */
9376                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9377                         continue;
9378
9379                 lock_and_validation_needed = true;
9380         }
9381
9382         /**
9383          * Streams and planes are reset when there are changes that affect
9384          * bandwidth. Anything that affects bandwidth needs to go through
9385          * DC global validation to ensure that the configuration can be applied
9386          * to hardware.
9387          *
9388          * We have to currently stall out here in atomic_check for outstanding
9389          * commits to finish in this case because our IRQ handlers reference
9390          * DRM state directly - we can end up disabling interrupts too early
9391          * if we don't.
9392          *
9393          * TODO: Remove this stall and drop DM state private objects.
9394          */
9395         if (lock_and_validation_needed) {
9396                 ret = dm_atomic_get_state(state, &dm_state);
9397                 if (ret)
9398                         goto fail;
9399
9400                 ret = do_aquire_global_lock(dev, state);
9401                 if (ret)
9402                         goto fail;
9403
9404 #if defined(CONFIG_DRM_AMD_DC_DCN)
9405                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9406                         goto fail;
9407
9408                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9409                 if (ret)
9410                         goto fail;
9411 #endif
9412
9413                 /*
9414                  * Perform validation of MST topology in the state:
9415                  * We need to perform MST atomic check before calling
9416                  * dc_validate_global_state(), or there is a chance
9417                  * to get stuck in an infinite loop and hang eventually.
9418                  */
9419                 ret = drm_dp_mst_atomic_check(state);
9420                 if (ret)
9421                         goto fail;
9422                 status = dc_validate_global_state(dc, dm_state->context, false);
9423                 if (status != DC_OK) {
9424                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
9425                                        dc_status_to_str(status), status);
9426                         ret = -EINVAL;
9427                         goto fail;
9428                 }
9429         } else {
9430                 /*
9431                  * The commit is a fast update. Fast updates shouldn't change
9432                  * the DC context, affect global validation, and can have their
9433                  * commit work done in parallel with other commits not touching
9434                  * the same resource. If we have a new DC context as part of
9435                  * the DM atomic state from validation we need to free it and
9436                  * retain the existing one instead.
9437                  *
9438                  * Furthermore, since the DM atomic state only contains the DC
9439                  * context and can safely be annulled, we can free the state
9440                  * and clear the associated private object now to free
9441                  * some memory and avoid a possible use-after-free later.
9442                  */
9443
9444                 for (i = 0; i < state->num_private_objs; i++) {
9445                         struct drm_private_obj *obj = state->private_objs[i].ptr;
9446
9447                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
9448                                 int j = state->num_private_objs-1;
9449
9450                                 dm_atomic_destroy_state(obj,
9451                                                 state->private_objs[i].state);
9452
9453                                 /* If i is not at the end of the array then the
9454                                  * last element needs to be moved to where i was
9455                                  * before the array can safely be truncated.
9456                                  */
9457                                 if (i != j)
9458                                         state->private_objs[i] =
9459                                                 state->private_objs[j];
9460
9461                                 state->private_objs[j].ptr = NULL;
9462                                 state->private_objs[j].state = NULL;
9463                                 state->private_objs[j].old_state = NULL;
9464                                 state->private_objs[j].new_state = NULL;
9465
9466                                 state->num_private_objs = j;
9467                                 break;
9468                         }
9469                 }
9470         }
9471
9472         /* Store the overall update type for use later in atomic check. */
9473         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9474                 struct dm_crtc_state *dm_new_crtc_state =
9475                         to_dm_crtc_state(new_crtc_state);
9476
9477                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9478                                                          UPDATE_TYPE_FULL :
9479                                                          UPDATE_TYPE_FAST;
9480         }
9481
9482         /* Must be success */
9483         WARN_ON(ret);
9484
9485         trace_amdgpu_dm_atomic_check_finish(state, ret);
9486
9487         return ret;
9488
9489 fail:
9490         if (ret == -EDEADLK)
9491                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9492         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9493                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9494         else
9495                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9496
9497         trace_amdgpu_dm_atomic_check_finish(state, ret);
9498
9499         return ret;
9500 }
9501
9502 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9503                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
9504 {
9505         uint8_t dpcd_data;
9506         bool capable = false;
9507
9508         if (amdgpu_dm_connector->dc_link &&
9509                 dm_helpers_dp_read_dpcd(
9510                                 NULL,
9511                                 amdgpu_dm_connector->dc_link,
9512                                 DP_DOWN_STREAM_PORT_COUNT,
9513                                 &dpcd_data,
9514                                 sizeof(dpcd_data))) {
9515                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9516         }
9517
9518         return capable;
9519 }
9520 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9521                                         struct edid *edid)
9522 {
9523         int i;
9524         bool edid_check_required;
9525         struct detailed_timing *timing;
9526         struct detailed_non_pixel *data;
9527         struct detailed_data_monitor_range *range;
9528         struct amdgpu_dm_connector *amdgpu_dm_connector =
9529                         to_amdgpu_dm_connector(connector);
9530         struct dm_connector_state *dm_con_state = NULL;
9531
9532         struct drm_device *dev = connector->dev;
9533         struct amdgpu_device *adev = drm_to_adev(dev);
9534         bool freesync_capable = false;
9535
9536         if (!connector->state) {
9537                 DRM_ERROR("%s - Connector has no state", __func__);
9538                 goto update;
9539         }
9540
9541         if (!edid) {
9542                 dm_con_state = to_dm_connector_state(connector->state);
9543
9544                 amdgpu_dm_connector->min_vfreq = 0;
9545                 amdgpu_dm_connector->max_vfreq = 0;
9546                 amdgpu_dm_connector->pixel_clock_mhz = 0;
9547
9548                 goto update;
9549         }
9550
9551         dm_con_state = to_dm_connector_state(connector->state);
9552
9553         edid_check_required = false;
9554         if (!amdgpu_dm_connector->dc_sink) {
9555                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9556                 goto update;
9557         }
9558         if (!adev->dm.freesync_module)
9559                 goto update;
9560         /*
9561          * if edid non zero restrict freesync only for dp and edp
9562          */
9563         if (edid) {
9564                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9565                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9566                         edid_check_required = is_dp_capable_without_timing_msa(
9567                                                 adev->dm.dc,
9568                                                 amdgpu_dm_connector);
9569                 }
9570         }
9571         if (edid_check_required == true && (edid->version > 1 ||
9572            (edid->version == 1 && edid->revision > 1))) {
9573                 for (i = 0; i < 4; i++) {
9574
9575                         timing  = &edid->detailed_timings[i];
9576                         data    = &timing->data.other_data;
9577                         range   = &data->data.range;
9578                         /*
9579                          * Check if monitor has continuous frequency mode
9580                          */
9581                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
9582                                 continue;
9583                         /*
9584                          * Check for flag range limits only. If flag == 1 then
9585                          * no additional timing information provided.
9586                          * Default GTF, GTF Secondary curve and CVT are not
9587                          * supported
9588                          */
9589                         if (range->flags != 1)
9590                                 continue;
9591
9592                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9593                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9594                         amdgpu_dm_connector->pixel_clock_mhz =
9595                                 range->pixel_clock_mhz * 10;
9596                         break;
9597                 }
9598
9599                 if (amdgpu_dm_connector->max_vfreq -
9600                     amdgpu_dm_connector->min_vfreq > 10) {
9601
9602                         freesync_capable = true;
9603                 }
9604         }
9605
9606 update:
9607         if (dm_con_state)
9608                 dm_con_state->freesync_capable = freesync_capable;
9609
9610         if (connector->vrr_capable_property)
9611                 drm_connector_set_vrr_capable_property(connector,
9612                                                        freesync_capable);
9613 }
9614
9615 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9616 {
9617         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9618
9619         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9620                 return;
9621         if (link->type == dc_connection_none)
9622                 return;
9623         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9624                                         dpcd_data, sizeof(dpcd_data))) {
9625                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9626
9627                 if (dpcd_data[0] == 0) {
9628                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9629                         link->psr_settings.psr_feature_enabled = false;
9630                 } else {
9631                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
9632                         link->psr_settings.psr_feature_enabled = true;
9633                 }
9634
9635                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9636         }
9637 }
9638
9639 /*
9640  * amdgpu_dm_link_setup_psr() - configure psr link
9641  * @stream: stream state
9642  *
9643  * Return: true if success
9644  */
9645 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9646 {
9647         struct dc_link *link = NULL;
9648         struct psr_config psr_config = {0};
9649         struct psr_context psr_context = {0};
9650         bool ret = false;
9651
9652         if (stream == NULL)
9653                 return false;
9654
9655         link = stream->link;
9656
9657         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9658
9659         if (psr_config.psr_version > 0) {
9660                 psr_config.psr_exit_link_training_required = 0x1;
9661                 psr_config.psr_frame_capture_indication_req = 0;
9662                 psr_config.psr_rfb_setup_time = 0x37;
9663                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9664                 psr_config.allow_smu_optimizations = 0x0;
9665
9666                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9667
9668         }
9669         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
9670
9671         return ret;
9672 }
9673
9674 /*
9675  * amdgpu_dm_psr_enable() - enable psr f/w
9676  * @stream: stream state
9677  *
9678  * Return: true if success
9679  */
9680 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9681 {
9682         struct dc_link *link = stream->link;
9683         unsigned int vsync_rate_hz = 0;
9684         struct dc_static_screen_params params = {0};
9685         /* Calculate number of static frames before generating interrupt to
9686          * enter PSR.
9687          */
9688         // Init fail safe of 2 frames static
9689         unsigned int num_frames_static = 2;
9690
9691         DRM_DEBUG_DRIVER("Enabling psr...\n");
9692
9693         vsync_rate_hz = div64_u64(div64_u64((
9694                         stream->timing.pix_clk_100hz * 100),
9695                         stream->timing.v_total),
9696                         stream->timing.h_total);
9697
9698         /* Round up
9699          * Calculate number of frames such that at least 30 ms of time has
9700          * passed.
9701          */
9702         if (vsync_rate_hz != 0) {
9703                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9704                 num_frames_static = (30000 / frame_time_microsec) + 1;
9705         }
9706
9707         params.triggers.cursor_update = true;
9708         params.triggers.overlay_update = true;
9709         params.triggers.surface_update = true;
9710         params.num_frames = num_frames_static;
9711
9712         dc_stream_set_static_screen_params(link->ctx->dc,
9713                                            &stream, 1,
9714                                            &params);
9715
9716         return dc_link_set_psr_allow_active(link, true, false, false);
9717 }
9718
9719 /*
9720  * amdgpu_dm_psr_disable() - disable psr f/w
9721  * @stream:  stream state
9722  *
9723  * Return: true if success
9724  */
9725 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9726 {
9727
9728         DRM_DEBUG_DRIVER("Disabling psr...\n");
9729
9730         return dc_link_set_psr_allow_active(stream->link, false, true, false);
9731 }
9732
9733 /*
9734  * amdgpu_dm_psr_disable() - disable psr f/w
9735  * if psr is enabled on any stream
9736  *
9737  * Return: true if success
9738  */
9739 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9740 {
9741         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9742         return dc_set_psr_allow_active(dm->dc, false);
9743 }
9744
9745 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9746 {
9747         struct amdgpu_device *adev = drm_to_adev(dev);
9748         struct dc *dc = adev->dm.dc;
9749         int i;
9750
9751         mutex_lock(&adev->dm.dc_lock);
9752         if (dc->current_state) {
9753                 for (i = 0; i < dc->current_state->stream_count; ++i)
9754                         dc->current_state->streams[i]
9755                                 ->triggered_crtc_reset.enabled =
9756                                 adev->dm.force_timing_sync;
9757
9758                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9759                 dc_trigger_sync(dc, dc->current_state);
9760         }
9761         mutex_unlock(&adev->dm.dc_lock);
9762 }
9763
9764 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9765                        uint32_t value, const char *func_name)
9766 {
9767 #ifdef DM_CHECK_ADDR_0
9768         if (address == 0) {
9769                 DC_ERR("invalid register write. address = 0");
9770                 return;
9771         }
9772 #endif
9773         cgs_write_register(ctx->cgs_device, address, value);
9774         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9775 }
9776
9777 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9778                           const char *func_name)
9779 {
9780         uint32_t value;
9781 #ifdef DM_CHECK_ADDR_0
9782         if (address == 0) {
9783                 DC_ERR("invalid register read; address = 0\n");
9784                 return 0;
9785         }
9786 #endif
9787
9788         if (ctx->dmub_srv &&
9789             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9790             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9791                 ASSERT(false);
9792                 return 0;
9793         }
9794
9795         value = cgs_read_register(ctx->cgs_device, address);
9796
9797         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9798
9799         return value;
9800 }