drm/amd/display: Free atomic state after drm_atomic_commit
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58
59 #include "ivsrcid/ivsrcid_vislands30.h"
60
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136         switch (link->dpcd_caps.dongle_type) {
137         case DISPLAY_DONGLE_NONE:
138                 return DRM_MODE_SUBCONNECTOR_Native;
139         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140                 return DRM_MODE_SUBCONNECTOR_VGA;
141         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142         case DISPLAY_DONGLE_DP_DVI_DONGLE:
143                 return DRM_MODE_SUBCONNECTOR_DVID;
144         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146                 return DRM_MODE_SUBCONNECTOR_HDMIA;
147         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148         default:
149                 return DRM_MODE_SUBCONNECTOR_Unknown;
150         }
151 }
152
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155         struct dc_link *link = aconnector->dc_link;
156         struct drm_connector *connector = &aconnector->base;
157         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158
159         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160                 return;
161
162         if (aconnector->dc_sink)
163                 subconnector = get_subconnector_type(link);
164
165         drm_object_property_set_value(&connector->base,
166                         connector->dev->mode_config.dp_subconnector_property,
167                         subconnector);
168 }
169
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182                                 struct drm_plane *plane,
183                                 unsigned long possible_crtcs,
184                                 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186                                struct drm_plane *plane,
187                                uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
190                                     uint32_t link_index,
191                                     struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193                                   struct amdgpu_encoder *aencoder,
194                                   uint32_t link_index);
195
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201                                   struct drm_atomic_state *state);
202
203 static void handle_cursor_update(struct drm_plane *plane,
204                                  struct drm_plane_state *old_plane_state);
205
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214
215 /*
216  * dm_vblank_get_counter
217  *
218  * @brief
219  * Get counter for number of vertical blanks
220  *
221  * @param
222  * struct amdgpu_device *adev - [in] desired amdgpu device
223  * int disp_idx - [in] which CRTC to get the counter from
224  *
225  * @return
226  * Counter for vertical blanks
227  */
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229 {
230         if (crtc >= adev->mode_info.num_crtc)
231                 return 0;
232         else {
233                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234
235                 if (acrtc->dm_irq_params.stream == NULL) {
236                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237                                   crtc);
238                         return 0;
239                 }
240
241                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
242         }
243 }
244
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246                                   u32 *vbl, u32 *position)
247 {
248         uint32_t v_blank_start, v_blank_end, h_position, v_position;
249
250         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251                 return -EINVAL;
252         else {
253                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254
255                 if (acrtc->dm_irq_params.stream ==  NULL) {
256                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257                                   crtc);
258                         return 0;
259                 }
260
261                 /*
262                  * TODO rework base driver to use values directly.
263                  * for now parse it back into reg-format
264                  */
265                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
266                                          &v_blank_start,
267                                          &v_blank_end,
268                                          &h_position,
269                                          &v_position);
270
271                 *position = v_position | (h_position << 16);
272                 *vbl = v_blank_start | (v_blank_end << 16);
273         }
274
275         return 0;
276 }
277
278 static bool dm_is_idle(void *handle)
279 {
280         /* XXX todo */
281         return true;
282 }
283
284 static int dm_wait_for_idle(void *handle)
285 {
286         /* XXX todo */
287         return 0;
288 }
289
290 static bool dm_check_soft_reset(void *handle)
291 {
292         return false;
293 }
294
295 static int dm_soft_reset(void *handle)
296 {
297         /* XXX todo */
298         return 0;
299 }
300
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
303                      int otg_inst)
304 {
305         struct drm_device *dev = adev_to_drm(adev);
306         struct drm_crtc *crtc;
307         struct amdgpu_crtc *amdgpu_crtc;
308
309         if (otg_inst == -1) {
310                 WARN_ON(1);
311                 return adev->mode_info.crtcs[0];
312         }
313
314         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315                 amdgpu_crtc = to_amdgpu_crtc(crtc);
316
317                 if (amdgpu_crtc->otg_inst == otg_inst)
318                         return amdgpu_crtc;
319         }
320
321         return NULL;
322 }
323
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325 {
326         return acrtc->dm_irq_params.freesync_config.state ==
327                        VRR_STATE_ACTIVE_VARIABLE ||
328                acrtc->dm_irq_params.freesync_config.state ==
329                        VRR_STATE_ACTIVE_FIXED;
330 }
331
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333 {
334         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336 }
337
338 /**
339  * dm_pflip_high_irq() - Handle pageflip interrupt
340  * @interrupt_params: ignored
341  *
342  * Handles the pageflip interrupt by notifying all interested parties
343  * that the pageflip has been completed.
344  */
345 static void dm_pflip_high_irq(void *interrupt_params)
346 {
347         struct amdgpu_crtc *amdgpu_crtc;
348         struct common_irq_params *irq_params = interrupt_params;
349         struct amdgpu_device *adev = irq_params->adev;
350         unsigned long flags;
351         struct drm_pending_vblank_event *e;
352         uint32_t vpos, hpos, v_blank_start, v_blank_end;
353         bool vrr_active;
354
355         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356
357         /* IRQ could occur when in initial stage */
358         /* TODO work and BO cleanup */
359         if (amdgpu_crtc == NULL) {
360                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361                 return;
362         }
363
364         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
365
366         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368                                                  amdgpu_crtc->pflip_status,
369                                                  AMDGPU_FLIP_SUBMITTED,
370                                                  amdgpu_crtc->crtc_id,
371                                                  amdgpu_crtc);
372                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
373                 return;
374         }
375
376         /* page flip completed. */
377         e = amdgpu_crtc->event;
378         amdgpu_crtc->event = NULL;
379
380         if (!e)
381                 WARN_ON(1);
382
383         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
384
385         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
386         if (!vrr_active ||
387             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388                                       &v_blank_end, &hpos, &vpos) ||
389             (vpos < v_blank_start)) {
390                 /* Update to correct count and vblank timestamp if racing with
391                  * vblank irq. This also updates to the correct vblank timestamp
392                  * even in VRR mode, as scanout is past the front-porch atm.
393                  */
394                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
395
396                 /* Wake up userspace by sending the pageflip event with proper
397                  * count and timestamp of vblank of flip completion.
398                  */
399                 if (e) {
400                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401
402                         /* Event sent, so done with vblank for this flip */
403                         drm_crtc_vblank_put(&amdgpu_crtc->base);
404                 }
405         } else if (e) {
406                 /* VRR active and inside front-porch: vblank count and
407                  * timestamp for pageflip event will only be up to date after
408                  * drm_crtc_handle_vblank() has been executed from late vblank
409                  * irq handler after start of back-porch (vline 0). We queue the
410                  * pageflip event for send-out by drm_crtc_handle_vblank() with
411                  * updated timestamp and count, once it runs after us.
412                  *
413                  * We need to open-code this instead of using the helper
414                  * drm_crtc_arm_vblank_event(), as that helper would
415                  * call drm_crtc_accurate_vblank_count(), which we must
416                  * not call in VRR mode while we are in front-porch!
417                  */
418
419                 /* sequence will be replaced by real count during send-out. */
420                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421                 e->pipe = amdgpu_crtc->crtc_id;
422
423                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
424                 e = NULL;
425         }
426
427         /* Keep track of vblank of this flip for flip throttling. We use the
428          * cooked hw counter, as that one incremented at start of this vblank
429          * of pageflip completion, so last_flip_vblank is the forbidden count
430          * for queueing new pageflips if vsync + VRR is enabled.
431          */
432         amdgpu_crtc->dm_irq_params.last_flip_vblank =
433                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
434
435         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
437
438         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439                          amdgpu_crtc->crtc_id, amdgpu_crtc,
440                          vrr_active, (int) !e);
441 }
442
443 static void dm_vupdate_high_irq(void *interrupt_params)
444 {
445         struct common_irq_params *irq_params = interrupt_params;
446         struct amdgpu_device *adev = irq_params->adev;
447         struct amdgpu_crtc *acrtc;
448         unsigned long flags;
449         int vrr_active;
450
451         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452
453         if (acrtc) {
454                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
455
456                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457                               acrtc->crtc_id,
458                               vrr_active);
459
460                 /* Core vblank handling is done here after end of front-porch in
461                  * vrr mode, as vblank timestamping will give valid results
462                  * while now done after front-porch. This will also deliver
463                  * page-flip completion events that have been queued to us
464                  * if a pageflip happened inside front-porch.
465                  */
466                 if (vrr_active) {
467                         drm_crtc_handle_vblank(&acrtc->base);
468
469                         /* BTR processing for pre-DCE12 ASICs */
470                         if (acrtc->dm_irq_params.stream &&
471                             adev->family < AMDGPU_FAMILY_AI) {
472                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473                                 mod_freesync_handle_v_update(
474                                     adev->dm.freesync_module,
475                                     acrtc->dm_irq_params.stream,
476                                     &acrtc->dm_irq_params.vrr_params);
477
478                                 dc_stream_adjust_vmin_vmax(
479                                     adev->dm.dc,
480                                     acrtc->dm_irq_params.stream,
481                                     &acrtc->dm_irq_params.vrr_params.adjust);
482                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
483                         }
484                 }
485         }
486 }
487
488 /**
489  * dm_crtc_high_irq() - Handles CRTC interrupt
490  * @interrupt_params: used for determining the CRTC instance
491  *
492  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493  * event handler.
494  */
495 static void dm_crtc_high_irq(void *interrupt_params)
496 {
497         struct common_irq_params *irq_params = interrupt_params;
498         struct amdgpu_device *adev = irq_params->adev;
499         struct amdgpu_crtc *acrtc;
500         unsigned long flags;
501         int vrr_active;
502
503         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
504         if (!acrtc)
505                 return;
506
507         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
508
509         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510                       vrr_active, acrtc->dm_irq_params.active_planes);
511
512         /**
513          * Core vblank handling at start of front-porch is only possible
514          * in non-vrr mode, as only there vblank timestamping will give
515          * valid results while done in front-porch. Otherwise defer it
516          * to dm_vupdate_high_irq after end of front-porch.
517          */
518         if (!vrr_active)
519                 drm_crtc_handle_vblank(&acrtc->base);
520
521         /**
522          * Following stuff must happen at start of vblank, for crc
523          * computation and below-the-range btr support in vrr mode.
524          */
525         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
526
527         /* BTR updates need to happen before VUPDATE on Vega and above. */
528         if (adev->family < AMDGPU_FAMILY_AI)
529                 return;
530
531         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
532
533         if (acrtc->dm_irq_params.stream &&
534             acrtc->dm_irq_params.vrr_params.supported &&
535             acrtc->dm_irq_params.freesync_config.state ==
536                     VRR_STATE_ACTIVE_VARIABLE) {
537                 mod_freesync_handle_v_update(adev->dm.freesync_module,
538                                              acrtc->dm_irq_params.stream,
539                                              &acrtc->dm_irq_params.vrr_params);
540
541                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542                                            &acrtc->dm_irq_params.vrr_params.adjust);
543         }
544
545         /*
546          * If there aren't any active_planes then DCH HUBP may be clock-gated.
547          * In that case, pageflip completion interrupts won't fire and pageflip
548          * completion events won't get delivered. Prevent this by sending
549          * pending pageflip events from here if a flip is still pending.
550          *
551          * If any planes are enabled, use dm_pflip_high_irq() instead, to
552          * avoid race conditions between flip programming and completion,
553          * which could cause too early flip completion events.
554          */
555         if (adev->family >= AMDGPU_FAMILY_RV &&
556             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557             acrtc->dm_irq_params.active_planes == 0) {
558                 if (acrtc->event) {
559                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560                         acrtc->event = NULL;
561                         drm_crtc_vblank_put(&acrtc->base);
562                 }
563                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
564         }
565
566         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
567 }
568
569 static int dm_set_clockgating_state(void *handle,
570                   enum amd_clockgating_state state)
571 {
572         return 0;
573 }
574
575 static int dm_set_powergating_state(void *handle,
576                   enum amd_powergating_state state)
577 {
578         return 0;
579 }
580
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
583
584 /* Allocate memory for FBC compressed data  */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
586 {
587         struct drm_device *dev = connector->dev;
588         struct amdgpu_device *adev = drm_to_adev(dev);
589         struct dm_compressor_info *compressor = &adev->dm.compressor;
590         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591         struct drm_display_mode *mode;
592         unsigned long max_size = 0;
593
594         if (adev->dm.dc->fbc_compressor == NULL)
595                 return;
596
597         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
598                 return;
599
600         if (compressor->bo_ptr)
601                 return;
602
603
604         list_for_each_entry(mode, &connector->modes, head) {
605                 if (max_size < mode->htotal * mode->vtotal)
606                         max_size = mode->htotal * mode->vtotal;
607         }
608
609         if (max_size) {
610                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612                             &compressor->gpu_addr, &compressor->cpu_addr);
613
614                 if (r)
615                         DRM_ERROR("DM: Failed to initialize FBC\n");
616                 else {
617                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619                 }
620
621         }
622
623 }
624
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626                                           int pipe, bool *enabled,
627                                           unsigned char *buf, int max_bytes)
628 {
629         struct drm_device *dev = dev_get_drvdata(kdev);
630         struct amdgpu_device *adev = drm_to_adev(dev);
631         struct drm_connector *connector;
632         struct drm_connector_list_iter conn_iter;
633         struct amdgpu_dm_connector *aconnector;
634         int ret = 0;
635
636         *enabled = false;
637
638         mutex_lock(&adev->dm.audio_lock);
639
640         drm_connector_list_iter_begin(dev, &conn_iter);
641         drm_for_each_connector_iter(connector, &conn_iter) {
642                 aconnector = to_amdgpu_dm_connector(connector);
643                 if (aconnector->audio_inst != port)
644                         continue;
645
646                 *enabled = true;
647                 ret = drm_eld_size(connector->eld);
648                 memcpy(buf, connector->eld, min(max_bytes, ret));
649
650                 break;
651         }
652         drm_connector_list_iter_end(&conn_iter);
653
654         mutex_unlock(&adev->dm.audio_lock);
655
656         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657
658         return ret;
659 }
660
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662         .get_eld = amdgpu_dm_audio_component_get_eld,
663 };
664
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666                                        struct device *hda_kdev, void *data)
667 {
668         struct drm_device *dev = dev_get_drvdata(kdev);
669         struct amdgpu_device *adev = drm_to_adev(dev);
670         struct drm_audio_component *acomp = data;
671
672         acomp->ops = &amdgpu_dm_audio_component_ops;
673         acomp->dev = kdev;
674         adev->dm.audio_component = acomp;
675
676         return 0;
677 }
678
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680                                           struct device *hda_kdev, void *data)
681 {
682         struct drm_device *dev = dev_get_drvdata(kdev);
683         struct amdgpu_device *adev = drm_to_adev(dev);
684         struct drm_audio_component *acomp = data;
685
686         acomp->ops = NULL;
687         acomp->dev = NULL;
688         adev->dm.audio_component = NULL;
689 }
690
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692         .bind   = amdgpu_dm_audio_component_bind,
693         .unbind = amdgpu_dm_audio_component_unbind,
694 };
695
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697 {
698         int i, ret;
699
700         if (!amdgpu_audio)
701                 return 0;
702
703         adev->mode_info.audio.enabled = true;
704
705         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706
707         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708                 adev->mode_info.audio.pin[i].channels = -1;
709                 adev->mode_info.audio.pin[i].rate = -1;
710                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
711                 adev->mode_info.audio.pin[i].status_bits = 0;
712                 adev->mode_info.audio.pin[i].category_code = 0;
713                 adev->mode_info.audio.pin[i].connected = false;
714                 adev->mode_info.audio.pin[i].id =
715                         adev->dm.dc->res_pool->audios[i]->inst;
716                 adev->mode_info.audio.pin[i].offset = 0;
717         }
718
719         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720         if (ret < 0)
721                 return ret;
722
723         adev->dm.audio_registered = true;
724
725         return 0;
726 }
727
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729 {
730         if (!amdgpu_audio)
731                 return;
732
733         if (!adev->mode_info.audio.enabled)
734                 return;
735
736         if (adev->dm.audio_registered) {
737                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738                 adev->dm.audio_registered = false;
739         }
740
741         /* TODO: Disable audio? */
742
743         adev->mode_info.audio.enabled = false;
744 }
745
746 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
747 {
748         struct drm_audio_component *acomp = adev->dm.audio_component;
749
750         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752
753                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754                                                  pin, -1);
755         }
756 }
757
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
759 {
760         const struct dmcub_firmware_header_v1_0 *hdr;
761         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763         const struct firmware *dmub_fw = adev->dm.dmub_fw;
764         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765         struct abm *abm = adev->dm.dc->res_pool->abm;
766         struct dmub_srv_hw_params hw_params;
767         enum dmub_status status;
768         const unsigned char *fw_inst_const, *fw_bss_data;
769         uint32_t i, fw_inst_const_size, fw_bss_data_size;
770         bool has_hw_support;
771
772         if (!dmub_srv)
773                 /* DMUB isn't supported on the ASIC. */
774                 return 0;
775
776         if (!fb_info) {
777                 DRM_ERROR("No framebuffer info for DMUB service.\n");
778                 return -EINVAL;
779         }
780
781         if (!dmub_fw) {
782                 /* Firmware required for DMUB support. */
783                 DRM_ERROR("No firmware provided for DMUB.\n");
784                 return -EINVAL;
785         }
786
787         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788         if (status != DMUB_STATUS_OK) {
789                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790                 return -EINVAL;
791         }
792
793         if (!has_hw_support) {
794                 DRM_INFO("DMUB unsupported on ASIC\n");
795                 return 0;
796         }
797
798         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799
800         fw_inst_const = dmub_fw->data +
801                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
802                         PSP_HEADER_BYTES;
803
804         fw_bss_data = dmub_fw->data +
805                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806                       le32_to_cpu(hdr->inst_const_bytes);
807
808         /* Copy firmware and bios info into FB memory. */
809         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811
812         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813
814         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815          * amdgpu_ucode_init_single_fw will load dmub firmware
816          * fw_inst_const part to cw0; otherwise, the firmware back door load
817          * will be done by dm_dmub_hw_init
818          */
819         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821                                 fw_inst_const_size);
822         }
823
824         if (fw_bss_data_size)
825                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826                        fw_bss_data, fw_bss_data_size);
827
828         /* Copy firmware bios info into FB memory. */
829         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830                adev->bios_size);
831
832         /* Reset regions that need to be reset. */
833         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835
836         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838
839         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
841
842         /* Initialize hardware. */
843         memset(&hw_params, 0, sizeof(hw_params));
844         hw_params.fb_base = adev->gmc.fb_start;
845         hw_params.fb_offset = adev->gmc.aper_base;
846
847         /* backdoor load firmware and trigger dmub running */
848         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849                 hw_params.load_inst_const = true;
850
851         if (dmcu)
852                 hw_params.psp_version = dmcu->psp_version;
853
854         for (i = 0; i < fb_info->num_fb; ++i)
855                 hw_params.fb[i] = &fb_info->fb[i];
856
857         status = dmub_srv_hw_init(dmub_srv, &hw_params);
858         if (status != DMUB_STATUS_OK) {
859                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860                 return -EINVAL;
861         }
862
863         /* Wait for firmware load to finish. */
864         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865         if (status != DMUB_STATUS_OK)
866                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867
868         /* Init DMCU and ABM if available. */
869         if (dmcu && abm) {
870                 dmcu->funcs->dmcu_init(dmcu);
871                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872         }
873
874         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875         if (!adev->dm.dc->ctx->dmub_srv) {
876                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877                 return -ENOMEM;
878         }
879
880         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881                  adev->dm.dmcub_fw_version);
882
883         return 0;
884 }
885
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
888 {
889         uint64_t pt_base;
890         uint32_t logical_addr_low;
891         uint32_t logical_addr_high;
892         uint32_t agp_base, agp_bot, agp_top;
893         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
894
895         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
897
898         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899                 /*
900                  * Raven2 has a HW issue that it is unable to use the vram which
901                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902                  * workaround that increase system aperture high address (add 1)
903                  * to get rid of the VM fault and hardware hang.
904                  */
905                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906         else
907                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
908
909         agp_base = 0;
910         agp_bot = adev->gmc.agp_start >> 24;
911         agp_top = adev->gmc.agp_end >> 24;
912
913
914         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919         page_table_base.low_part = lower_32_bits(pt_base);
920
921         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923
924         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927
928         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931
932         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935
936         pa_config->is_hvm_enabled = 0;
937
938 }
939 #endif
940
941 static int amdgpu_dm_init(struct amdgpu_device *adev)
942 {
943         struct dc_init_data init_data;
944 #ifdef CONFIG_DRM_AMD_DC_HDCP
945         struct dc_callback_init init_params;
946 #endif
947         int r;
948
949         adev->dm.ddev = adev_to_drm(adev);
950         adev->dm.adev = adev;
951
952         /* Zero all the fields */
953         memset(&init_data, 0, sizeof(init_data));
954 #ifdef CONFIG_DRM_AMD_DC_HDCP
955         memset(&init_params, 0, sizeof(init_params));
956 #endif
957
958         mutex_init(&adev->dm.dc_lock);
959         mutex_init(&adev->dm.audio_lock);
960
961         if(amdgpu_dm_irq_init(adev)) {
962                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
963                 goto error;
964         }
965
966         init_data.asic_id.chip_family = adev->family;
967
968         init_data.asic_id.pci_revision_id = adev->pdev->revision;
969         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
970
971         init_data.asic_id.vram_width = adev->gmc.vram_width;
972         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
973         init_data.asic_id.atombios_base_address =
974                 adev->mode_info.atom_context->bios;
975
976         init_data.driver = adev;
977
978         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
979
980         if (!adev->dm.cgs_device) {
981                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
982                 goto error;
983         }
984
985         init_data.cgs_device = adev->dm.cgs_device;
986
987         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
988
989         switch (adev->asic_type) {
990         case CHIP_CARRIZO:
991         case CHIP_STONEY:
992         case CHIP_RAVEN:
993         case CHIP_RENOIR:
994                 init_data.flags.gpu_vm_support = true;
995                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
996                         init_data.flags.disable_dmcu = true;
997                 break;
998 #if defined(CONFIG_DRM_AMD_DC_DCN)
999         case CHIP_VANGOGH:
1000                 init_data.flags.gpu_vm_support = true;
1001                 break;
1002 #endif
1003         default:
1004                 break;
1005         }
1006
1007         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1008                 init_data.flags.fbc_support = true;
1009
1010         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1011                 init_data.flags.multi_mon_pp_mclk_switch = true;
1012
1013         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1014                 init_data.flags.disable_fractional_pwm = true;
1015
1016         init_data.flags.power_down_display_on_boot = true;
1017
1018         /* Display Core create. */
1019         adev->dm.dc = dc_create(&init_data);
1020
1021         if (adev->dm.dc) {
1022                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1023         } else {
1024                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1025                 goto error;
1026         }
1027
1028         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1029                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1030                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1031         }
1032
1033         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1034                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1035
1036         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1037                 adev->dm.dc->debug.disable_stutter = true;
1038
1039         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1040                 adev->dm.dc->debug.disable_dsc = true;
1041
1042         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1043                 adev->dm.dc->debug.disable_clock_gate = true;
1044
1045         r = dm_dmub_hw_init(adev);
1046         if (r) {
1047                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1048                 goto error;
1049         }
1050
1051         dc_hardware_init(adev->dm.dc);
1052
1053 #if defined(CONFIG_DRM_AMD_DC_DCN)
1054         if (adev->apu_flags) {
1055                 struct dc_phy_addr_space_config pa_config;
1056
1057                 mmhub_read_system_context(adev, &pa_config);
1058
1059                 // Call the DC init_memory func
1060                 dc_setup_system_context(adev->dm.dc, &pa_config);
1061         }
1062 #endif
1063
1064         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1065         if (!adev->dm.freesync_module) {
1066                 DRM_ERROR(
1067                 "amdgpu: failed to initialize freesync_module.\n");
1068         } else
1069                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1070                                 adev->dm.freesync_module);
1071
1072         amdgpu_dm_init_color_mod();
1073
1074 #ifdef CONFIG_DRM_AMD_DC_HDCP
1075         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1076                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1077
1078                 if (!adev->dm.hdcp_workqueue)
1079                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1080                 else
1081                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1082
1083                 dc_init_callbacks(adev->dm.dc, &init_params);
1084         }
1085 #endif
1086         if (amdgpu_dm_initialize_drm_device(adev)) {
1087                 DRM_ERROR(
1088                 "amdgpu: failed to initialize sw for display support.\n");
1089                 goto error;
1090         }
1091
1092         /* create fake encoders for MST */
1093         dm_dp_create_fake_mst_encoders(adev);
1094
1095         /* TODO: Add_display_info? */
1096
1097         /* TODO use dynamic cursor width */
1098         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1099         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1100
1101         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1102                 DRM_ERROR(
1103                 "amdgpu: failed to initialize sw for display support.\n");
1104                 goto error;
1105         }
1106
1107
1108         DRM_DEBUG_DRIVER("KMS initialized.\n");
1109
1110         return 0;
1111 error:
1112         amdgpu_dm_fini(adev);
1113
1114         return -EINVAL;
1115 }
1116
1117 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1118 {
1119         int i;
1120
1121         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1122                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1123         }
1124
1125         amdgpu_dm_audio_fini(adev);
1126
1127         amdgpu_dm_destroy_drm_device(&adev->dm);
1128
1129 #ifdef CONFIG_DRM_AMD_DC_HDCP
1130         if (adev->dm.hdcp_workqueue) {
1131                 hdcp_destroy(adev->dm.hdcp_workqueue);
1132                 adev->dm.hdcp_workqueue = NULL;
1133         }
1134
1135         if (adev->dm.dc)
1136                 dc_deinit_callbacks(adev->dm.dc);
1137 #endif
1138         if (adev->dm.dc->ctx->dmub_srv) {
1139                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1140                 adev->dm.dc->ctx->dmub_srv = NULL;
1141         }
1142
1143         if (adev->dm.dmub_bo)
1144                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1145                                       &adev->dm.dmub_bo_gpu_addr,
1146                                       &adev->dm.dmub_bo_cpu_addr);
1147
1148         /* DC Destroy TODO: Replace destroy DAL */
1149         if (adev->dm.dc)
1150                 dc_destroy(&adev->dm.dc);
1151         /*
1152          * TODO: pageflip, vlank interrupt
1153          *
1154          * amdgpu_dm_irq_fini(adev);
1155          */
1156
1157         if (adev->dm.cgs_device) {
1158                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1159                 adev->dm.cgs_device = NULL;
1160         }
1161         if (adev->dm.freesync_module) {
1162                 mod_freesync_destroy(adev->dm.freesync_module);
1163                 adev->dm.freesync_module = NULL;
1164         }
1165
1166         mutex_destroy(&adev->dm.audio_lock);
1167         mutex_destroy(&adev->dm.dc_lock);
1168
1169         return;
1170 }
1171
1172 static int load_dmcu_fw(struct amdgpu_device *adev)
1173 {
1174         const char *fw_name_dmcu = NULL;
1175         int r;
1176         const struct dmcu_firmware_header_v1_0 *hdr;
1177
1178         switch(adev->asic_type) {
1179 #if defined(CONFIG_DRM_AMD_DC_SI)
1180         case CHIP_TAHITI:
1181         case CHIP_PITCAIRN:
1182         case CHIP_VERDE:
1183         case CHIP_OLAND:
1184 #endif
1185         case CHIP_BONAIRE:
1186         case CHIP_HAWAII:
1187         case CHIP_KAVERI:
1188         case CHIP_KABINI:
1189         case CHIP_MULLINS:
1190         case CHIP_TONGA:
1191         case CHIP_FIJI:
1192         case CHIP_CARRIZO:
1193         case CHIP_STONEY:
1194         case CHIP_POLARIS11:
1195         case CHIP_POLARIS10:
1196         case CHIP_POLARIS12:
1197         case CHIP_VEGAM:
1198         case CHIP_VEGA10:
1199         case CHIP_VEGA12:
1200         case CHIP_VEGA20:
1201         case CHIP_NAVI10:
1202         case CHIP_NAVI14:
1203         case CHIP_RENOIR:
1204         case CHIP_SIENNA_CICHLID:
1205         case CHIP_NAVY_FLOUNDER:
1206         case CHIP_DIMGREY_CAVEFISH:
1207         case CHIP_VANGOGH:
1208                 return 0;
1209         case CHIP_NAVI12:
1210                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1211                 break;
1212         case CHIP_RAVEN:
1213                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1214                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1215                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1216                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1217                 else
1218                         return 0;
1219                 break;
1220         default:
1221                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1222                 return -EINVAL;
1223         }
1224
1225         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1226                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1227                 return 0;
1228         }
1229
1230         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1231         if (r == -ENOENT) {
1232                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1233                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1234                 adev->dm.fw_dmcu = NULL;
1235                 return 0;
1236         }
1237         if (r) {
1238                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1239                         fw_name_dmcu);
1240                 return r;
1241         }
1242
1243         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1244         if (r) {
1245                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1246                         fw_name_dmcu);
1247                 release_firmware(adev->dm.fw_dmcu);
1248                 adev->dm.fw_dmcu = NULL;
1249                 return r;
1250         }
1251
1252         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1253         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1254         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1255         adev->firmware.fw_size +=
1256                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1257
1258         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1259         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1260         adev->firmware.fw_size +=
1261                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1262
1263         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1264
1265         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1266
1267         return 0;
1268 }
1269
1270 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1271 {
1272         struct amdgpu_device *adev = ctx;
1273
1274         return dm_read_reg(adev->dm.dc->ctx, address);
1275 }
1276
1277 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1278                                      uint32_t value)
1279 {
1280         struct amdgpu_device *adev = ctx;
1281
1282         return dm_write_reg(adev->dm.dc->ctx, address, value);
1283 }
1284
1285 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1286 {
1287         struct dmub_srv_create_params create_params;
1288         struct dmub_srv_region_params region_params;
1289         struct dmub_srv_region_info region_info;
1290         struct dmub_srv_fb_params fb_params;
1291         struct dmub_srv_fb_info *fb_info;
1292         struct dmub_srv *dmub_srv;
1293         const struct dmcub_firmware_header_v1_0 *hdr;
1294         const char *fw_name_dmub;
1295         enum dmub_asic dmub_asic;
1296         enum dmub_status status;
1297         int r;
1298
1299         switch (adev->asic_type) {
1300         case CHIP_RENOIR:
1301                 dmub_asic = DMUB_ASIC_DCN21;
1302                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1303                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1304                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1305                 break;
1306         case CHIP_SIENNA_CICHLID:
1307                 dmub_asic = DMUB_ASIC_DCN30;
1308                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1309                 break;
1310         case CHIP_NAVY_FLOUNDER:
1311                 dmub_asic = DMUB_ASIC_DCN30;
1312                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1313                 break;
1314         case CHIP_VANGOGH:
1315                 dmub_asic = DMUB_ASIC_DCN301;
1316                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1317                 break;
1318         case CHIP_DIMGREY_CAVEFISH:
1319                 dmub_asic = DMUB_ASIC_DCN302;
1320                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1321                 break;
1322
1323         default:
1324                 /* ASIC doesn't support DMUB. */
1325                 return 0;
1326         }
1327
1328         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1329         if (r) {
1330                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1331                 return 0;
1332         }
1333
1334         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1335         if (r) {
1336                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1337                 return 0;
1338         }
1339
1340         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1341
1342         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1343                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1344                         AMDGPU_UCODE_ID_DMCUB;
1345                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1346                         adev->dm.dmub_fw;
1347                 adev->firmware.fw_size +=
1348                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1349
1350                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1351                          adev->dm.dmcub_fw_version);
1352         }
1353
1354         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1355
1356         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1357         dmub_srv = adev->dm.dmub_srv;
1358
1359         if (!dmub_srv) {
1360                 DRM_ERROR("Failed to allocate DMUB service!\n");
1361                 return -ENOMEM;
1362         }
1363
1364         memset(&create_params, 0, sizeof(create_params));
1365         create_params.user_ctx = adev;
1366         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1367         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1368         create_params.asic = dmub_asic;
1369
1370         /* Create the DMUB service. */
1371         status = dmub_srv_create(dmub_srv, &create_params);
1372         if (status != DMUB_STATUS_OK) {
1373                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1374                 return -EINVAL;
1375         }
1376
1377         /* Calculate the size of all the regions for the DMUB service. */
1378         memset(&region_params, 0, sizeof(region_params));
1379
1380         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1381                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1382         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1383         region_params.vbios_size = adev->bios_size;
1384         region_params.fw_bss_data = region_params.bss_data_size ?
1385                 adev->dm.dmub_fw->data +
1386                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1387                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1388         region_params.fw_inst_const =
1389                 adev->dm.dmub_fw->data +
1390                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1391                 PSP_HEADER_BYTES;
1392
1393         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1394                                            &region_info);
1395
1396         if (status != DMUB_STATUS_OK) {
1397                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1398                 return -EINVAL;
1399         }
1400
1401         /*
1402          * Allocate a framebuffer based on the total size of all the regions.
1403          * TODO: Move this into GART.
1404          */
1405         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1406                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1407                                     &adev->dm.dmub_bo_gpu_addr,
1408                                     &adev->dm.dmub_bo_cpu_addr);
1409         if (r)
1410                 return r;
1411
1412         /* Rebase the regions on the framebuffer address. */
1413         memset(&fb_params, 0, sizeof(fb_params));
1414         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1415         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1416         fb_params.region_info = &region_info;
1417
1418         adev->dm.dmub_fb_info =
1419                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1420         fb_info = adev->dm.dmub_fb_info;
1421
1422         if (!fb_info) {
1423                 DRM_ERROR(
1424                         "Failed to allocate framebuffer info for DMUB service!\n");
1425                 return -ENOMEM;
1426         }
1427
1428         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1429         if (status != DMUB_STATUS_OK) {
1430                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1431                 return -EINVAL;
1432         }
1433
1434         return 0;
1435 }
1436
1437 static int dm_sw_init(void *handle)
1438 {
1439         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1440         int r;
1441
1442         r = dm_dmub_sw_init(adev);
1443         if (r)
1444                 return r;
1445
1446         return load_dmcu_fw(adev);
1447 }
1448
1449 static int dm_sw_fini(void *handle)
1450 {
1451         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1452
1453         kfree(adev->dm.dmub_fb_info);
1454         adev->dm.dmub_fb_info = NULL;
1455
1456         if (adev->dm.dmub_srv) {
1457                 dmub_srv_destroy(adev->dm.dmub_srv);
1458                 adev->dm.dmub_srv = NULL;
1459         }
1460
1461         release_firmware(adev->dm.dmub_fw);
1462         adev->dm.dmub_fw = NULL;
1463
1464         release_firmware(adev->dm.fw_dmcu);
1465         adev->dm.fw_dmcu = NULL;
1466
1467         return 0;
1468 }
1469
1470 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1471 {
1472         struct amdgpu_dm_connector *aconnector;
1473         struct drm_connector *connector;
1474         struct drm_connector_list_iter iter;
1475         int ret = 0;
1476
1477         drm_connector_list_iter_begin(dev, &iter);
1478         drm_for_each_connector_iter(connector, &iter) {
1479                 aconnector = to_amdgpu_dm_connector(connector);
1480                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1481                     aconnector->mst_mgr.aux) {
1482                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1483                                          aconnector,
1484                                          aconnector->base.base.id);
1485
1486                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1487                         if (ret < 0) {
1488                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1489                                 aconnector->dc_link->type =
1490                                         dc_connection_single;
1491                                 break;
1492                         }
1493                 }
1494         }
1495         drm_connector_list_iter_end(&iter);
1496
1497         return ret;
1498 }
1499
1500 static int dm_late_init(void *handle)
1501 {
1502         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1503
1504         struct dmcu_iram_parameters params;
1505         unsigned int linear_lut[16];
1506         int i;
1507         struct dmcu *dmcu = NULL;
1508         bool ret = true;
1509
1510         dmcu = adev->dm.dc->res_pool->dmcu;
1511
1512         for (i = 0; i < 16; i++)
1513                 linear_lut[i] = 0xFFFF * i / 15;
1514
1515         params.set = 0;
1516         params.backlight_ramping_start = 0xCCCC;
1517         params.backlight_ramping_reduction = 0xCCCCCCCC;
1518         params.backlight_lut_array_size = 16;
1519         params.backlight_lut_array = linear_lut;
1520
1521         /* Min backlight level after ABM reduction,  Don't allow below 1%
1522          * 0xFFFF x 0.01 = 0x28F
1523          */
1524         params.min_abm_backlight = 0x28F;
1525
1526         /* In the case where abm is implemented on dmcub,
1527          * dmcu object will be null.
1528          * ABM 2.4 and up are implemented on dmcub.
1529          */
1530         if (dmcu)
1531                 ret = dmcu_load_iram(dmcu, params);
1532         else if (adev->dm.dc->ctx->dmub_srv)
1533                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1534
1535         if (!ret)
1536                 return -EINVAL;
1537
1538         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1539 }
1540
1541 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1542 {
1543         struct amdgpu_dm_connector *aconnector;
1544         struct drm_connector *connector;
1545         struct drm_connector_list_iter iter;
1546         struct drm_dp_mst_topology_mgr *mgr;
1547         int ret;
1548         bool need_hotplug = false;
1549
1550         drm_connector_list_iter_begin(dev, &iter);
1551         drm_for_each_connector_iter(connector, &iter) {
1552                 aconnector = to_amdgpu_dm_connector(connector);
1553                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1554                     aconnector->mst_port)
1555                         continue;
1556
1557                 mgr = &aconnector->mst_mgr;
1558
1559                 if (suspend) {
1560                         drm_dp_mst_topology_mgr_suspend(mgr);
1561                 } else {
1562                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1563                         if (ret < 0) {
1564                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1565                                 need_hotplug = true;
1566                         }
1567                 }
1568         }
1569         drm_connector_list_iter_end(&iter);
1570
1571         if (need_hotplug)
1572                 drm_kms_helper_hotplug_event(dev);
1573 }
1574
1575 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1576 {
1577         struct smu_context *smu = &adev->smu;
1578         int ret = 0;
1579
1580         if (!is_support_sw_smu(adev))
1581                 return 0;
1582
1583         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1584          * on window driver dc implementation.
1585          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1586          * should be passed to smu during boot up and resume from s3.
1587          * boot up: dc calculate dcn watermark clock settings within dc_create,
1588          * dcn20_resource_construct
1589          * then call pplib functions below to pass the settings to smu:
1590          * smu_set_watermarks_for_clock_ranges
1591          * smu_set_watermarks_table
1592          * navi10_set_watermarks_table
1593          * smu_write_watermarks_table
1594          *
1595          * For Renoir, clock settings of dcn watermark are also fixed values.
1596          * dc has implemented different flow for window driver:
1597          * dc_hardware_init / dc_set_power_state
1598          * dcn10_init_hw
1599          * notify_wm_ranges
1600          * set_wm_ranges
1601          * -- Linux
1602          * smu_set_watermarks_for_clock_ranges
1603          * renoir_set_watermarks_table
1604          * smu_write_watermarks_table
1605          *
1606          * For Linux,
1607          * dc_hardware_init -> amdgpu_dm_init
1608          * dc_set_power_state --> dm_resume
1609          *
1610          * therefore, this function apply to navi10/12/14 but not Renoir
1611          * *
1612          */
1613         switch(adev->asic_type) {
1614         case CHIP_NAVI10:
1615         case CHIP_NAVI14:
1616         case CHIP_NAVI12:
1617                 break;
1618         default:
1619                 return 0;
1620         }
1621
1622         ret = smu_write_watermarks_table(smu);
1623         if (ret) {
1624                 DRM_ERROR("Failed to update WMTABLE!\n");
1625                 return ret;
1626         }
1627
1628         return 0;
1629 }
1630
1631 /**
1632  * dm_hw_init() - Initialize DC device
1633  * @handle: The base driver device containing the amdgpu_dm device.
1634  *
1635  * Initialize the &struct amdgpu_display_manager device. This involves calling
1636  * the initializers of each DM component, then populating the struct with them.
1637  *
1638  * Although the function implies hardware initialization, both hardware and
1639  * software are initialized here. Splitting them out to their relevant init
1640  * hooks is a future TODO item.
1641  *
1642  * Some notable things that are initialized here:
1643  *
1644  * - Display Core, both software and hardware
1645  * - DC modules that we need (freesync and color management)
1646  * - DRM software states
1647  * - Interrupt sources and handlers
1648  * - Vblank support
1649  * - Debug FS entries, if enabled
1650  */
1651 static int dm_hw_init(void *handle)
1652 {
1653         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1654         /* Create DAL display manager */
1655         amdgpu_dm_init(adev);
1656         amdgpu_dm_hpd_init(adev);
1657
1658         return 0;
1659 }
1660
1661 /**
1662  * dm_hw_fini() - Teardown DC device
1663  * @handle: The base driver device containing the amdgpu_dm device.
1664  *
1665  * Teardown components within &struct amdgpu_display_manager that require
1666  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1667  * were loaded. Also flush IRQ workqueues and disable them.
1668  */
1669 static int dm_hw_fini(void *handle)
1670 {
1671         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1672
1673         amdgpu_dm_hpd_fini(adev);
1674
1675         amdgpu_dm_irq_fini(adev);
1676         amdgpu_dm_fini(adev);
1677         return 0;
1678 }
1679
1680
1681 static int dm_enable_vblank(struct drm_crtc *crtc);
1682 static void dm_disable_vblank(struct drm_crtc *crtc);
1683
1684 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1685                                  struct dc_state *state, bool enable)
1686 {
1687         enum dc_irq_source irq_source;
1688         struct amdgpu_crtc *acrtc;
1689         int rc = -EBUSY;
1690         int i = 0;
1691
1692         for (i = 0; i < state->stream_count; i++) {
1693                 acrtc = get_crtc_by_otg_inst(
1694                                 adev, state->stream_status[i].primary_otg_inst);
1695
1696                 if (acrtc && state->stream_status[i].plane_count != 0) {
1697                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1698                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1699                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1700                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1701                         if (rc)
1702                                 DRM_WARN("Failed to %s pflip interrupts\n",
1703                                          enable ? "enable" : "disable");
1704
1705                         if (enable) {
1706                                 rc = dm_enable_vblank(&acrtc->base);
1707                                 if (rc)
1708                                         DRM_WARN("Failed to enable vblank interrupts\n");
1709                         } else {
1710                                 dm_disable_vblank(&acrtc->base);
1711                         }
1712
1713                 }
1714         }
1715
1716 }
1717
1718 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1719 {
1720         struct dc_state *context = NULL;
1721         enum dc_status res = DC_ERROR_UNEXPECTED;
1722         int i;
1723         struct dc_stream_state *del_streams[MAX_PIPES];
1724         int del_streams_count = 0;
1725
1726         memset(del_streams, 0, sizeof(del_streams));
1727
1728         context = dc_create_state(dc);
1729         if (context == NULL)
1730                 goto context_alloc_fail;
1731
1732         dc_resource_state_copy_construct_current(dc, context);
1733
1734         /* First remove from context all streams */
1735         for (i = 0; i < context->stream_count; i++) {
1736                 struct dc_stream_state *stream = context->streams[i];
1737
1738                 del_streams[del_streams_count++] = stream;
1739         }
1740
1741         /* Remove all planes for removed streams and then remove the streams */
1742         for (i = 0; i < del_streams_count; i++) {
1743                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1744                         res = DC_FAIL_DETACH_SURFACES;
1745                         goto fail;
1746                 }
1747
1748                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1749                 if (res != DC_OK)
1750                         goto fail;
1751         }
1752
1753
1754         res = dc_validate_global_state(dc, context, false);
1755
1756         if (res != DC_OK) {
1757                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1758                 goto fail;
1759         }
1760
1761         res = dc_commit_state(dc, context);
1762
1763 fail:
1764         dc_release_state(context);
1765
1766 context_alloc_fail:
1767         return res;
1768 }
1769
1770 static int dm_suspend(void *handle)
1771 {
1772         struct amdgpu_device *adev = handle;
1773         struct amdgpu_display_manager *dm = &adev->dm;
1774         int ret = 0;
1775
1776         if (amdgpu_in_reset(adev)) {
1777                 mutex_lock(&dm->dc_lock);
1778
1779 #if defined(CONFIG_DRM_AMD_DC_DCN)
1780                 dc_allow_idle_optimizations(adev->dm.dc, false);
1781 #endif
1782
1783                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1784
1785                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1786
1787                 amdgpu_dm_commit_zero_streams(dm->dc);
1788
1789                 amdgpu_dm_irq_suspend(adev);
1790
1791                 return ret;
1792         }
1793
1794         WARN_ON(adev->dm.cached_state);
1795         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1796
1797         s3_handle_mst(adev_to_drm(adev), true);
1798
1799         amdgpu_dm_irq_suspend(adev);
1800
1801
1802         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1803
1804         return 0;
1805 }
1806
1807 static struct amdgpu_dm_connector *
1808 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1809                                              struct drm_crtc *crtc)
1810 {
1811         uint32_t i;
1812         struct drm_connector_state *new_con_state;
1813         struct drm_connector *connector;
1814         struct drm_crtc *crtc_from_state;
1815
1816         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1817                 crtc_from_state = new_con_state->crtc;
1818
1819                 if (crtc_from_state == crtc)
1820                         return to_amdgpu_dm_connector(connector);
1821         }
1822
1823         return NULL;
1824 }
1825
1826 static void emulated_link_detect(struct dc_link *link)
1827 {
1828         struct dc_sink_init_data sink_init_data = { 0 };
1829         struct display_sink_capability sink_caps = { 0 };
1830         enum dc_edid_status edid_status;
1831         struct dc_context *dc_ctx = link->ctx;
1832         struct dc_sink *sink = NULL;
1833         struct dc_sink *prev_sink = NULL;
1834
1835         link->type = dc_connection_none;
1836         prev_sink = link->local_sink;
1837
1838         if (prev_sink)
1839                 dc_sink_release(prev_sink);
1840
1841         switch (link->connector_signal) {
1842         case SIGNAL_TYPE_HDMI_TYPE_A: {
1843                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1844                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1845                 break;
1846         }
1847
1848         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1849                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1850                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1851                 break;
1852         }
1853
1854         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1855                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1856                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1857                 break;
1858         }
1859
1860         case SIGNAL_TYPE_LVDS: {
1861                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1862                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1863                 break;
1864         }
1865
1866         case SIGNAL_TYPE_EDP: {
1867                 sink_caps.transaction_type =
1868                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1869                 sink_caps.signal = SIGNAL_TYPE_EDP;
1870                 break;
1871         }
1872
1873         case SIGNAL_TYPE_DISPLAY_PORT: {
1874                 sink_caps.transaction_type =
1875                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1876                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1877                 break;
1878         }
1879
1880         default:
1881                 DC_ERROR("Invalid connector type! signal:%d\n",
1882                         link->connector_signal);
1883                 return;
1884         }
1885
1886         sink_init_data.link = link;
1887         sink_init_data.sink_signal = sink_caps.signal;
1888
1889         sink = dc_sink_create(&sink_init_data);
1890         if (!sink) {
1891                 DC_ERROR("Failed to create sink!\n");
1892                 return;
1893         }
1894
1895         /* dc_sink_create returns a new reference */
1896         link->local_sink = sink;
1897
1898         edid_status = dm_helpers_read_local_edid(
1899                         link->ctx,
1900                         link,
1901                         sink);
1902
1903         if (edid_status != EDID_OK)
1904                 DC_ERROR("Failed to read EDID");
1905
1906 }
1907
1908 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1909                                      struct amdgpu_display_manager *dm)
1910 {
1911         struct {
1912                 struct dc_surface_update surface_updates[MAX_SURFACES];
1913                 struct dc_plane_info plane_infos[MAX_SURFACES];
1914                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1915                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1916                 struct dc_stream_update stream_update;
1917         } * bundle;
1918         int k, m;
1919
1920         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1921
1922         if (!bundle) {
1923                 dm_error("Failed to allocate update bundle\n");
1924                 goto cleanup;
1925         }
1926
1927         for (k = 0; k < dc_state->stream_count; k++) {
1928                 bundle->stream_update.stream = dc_state->streams[k];
1929
1930                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1931                         bundle->surface_updates[m].surface =
1932                                 dc_state->stream_status->plane_states[m];
1933                         bundle->surface_updates[m].surface->force_full_update =
1934                                 true;
1935                 }
1936                 dc_commit_updates_for_stream(
1937                         dm->dc, bundle->surface_updates,
1938                         dc_state->stream_status->plane_count,
1939                         dc_state->streams[k], &bundle->stream_update);
1940         }
1941
1942 cleanup:
1943         kfree(bundle);
1944
1945         return;
1946 }
1947
1948 static void dm_set_dpms_off(struct dc_link *link)
1949 {
1950         struct dc_stream_state *stream_state;
1951         struct amdgpu_dm_connector *aconnector = link->priv;
1952         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1953         struct dc_stream_update stream_update;
1954         bool dpms_off = true;
1955
1956         memset(&stream_update, 0, sizeof(stream_update));
1957         stream_update.dpms_off = &dpms_off;
1958
1959         mutex_lock(&adev->dm.dc_lock);
1960         stream_state = dc_stream_find_from_link(link);
1961
1962         if (stream_state == NULL) {
1963                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1964                 mutex_unlock(&adev->dm.dc_lock);
1965                 return;
1966         }
1967
1968         stream_update.stream = stream_state;
1969         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1970                                      stream_state, &stream_update);
1971         mutex_unlock(&adev->dm.dc_lock);
1972 }
1973
1974 static int dm_resume(void *handle)
1975 {
1976         struct amdgpu_device *adev = handle;
1977         struct drm_device *ddev = adev_to_drm(adev);
1978         struct amdgpu_display_manager *dm = &adev->dm;
1979         struct amdgpu_dm_connector *aconnector;
1980         struct drm_connector *connector;
1981         struct drm_connector_list_iter iter;
1982         struct drm_crtc *crtc;
1983         struct drm_crtc_state *new_crtc_state;
1984         struct dm_crtc_state *dm_new_crtc_state;
1985         struct drm_plane *plane;
1986         struct drm_plane_state *new_plane_state;
1987         struct dm_plane_state *dm_new_plane_state;
1988         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1989         enum dc_connection_type new_connection_type = dc_connection_none;
1990         struct dc_state *dc_state;
1991         int i, r, j;
1992
1993         if (amdgpu_in_reset(adev)) {
1994                 dc_state = dm->cached_dc_state;
1995
1996                 r = dm_dmub_hw_init(adev);
1997                 if (r)
1998                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1999
2000                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2001                 dc_resume(dm->dc);
2002
2003                 amdgpu_dm_irq_resume_early(adev);
2004
2005                 for (i = 0; i < dc_state->stream_count; i++) {
2006                         dc_state->streams[i]->mode_changed = true;
2007                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2008                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2009                                         = 0xffffffff;
2010                         }
2011                 }
2012
2013                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2014
2015                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2016
2017                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2018
2019                 dc_release_state(dm->cached_dc_state);
2020                 dm->cached_dc_state = NULL;
2021
2022                 amdgpu_dm_irq_resume_late(adev);
2023
2024                 mutex_unlock(&dm->dc_lock);
2025
2026                 return 0;
2027         }
2028         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2029         dc_release_state(dm_state->context);
2030         dm_state->context = dc_create_state(dm->dc);
2031         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2032         dc_resource_state_construct(dm->dc, dm_state->context);
2033
2034         /* Before powering on DC we need to re-initialize DMUB. */
2035         r = dm_dmub_hw_init(adev);
2036         if (r)
2037                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2038
2039         /* power on hardware */
2040         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2041
2042         /* program HPD filter */
2043         dc_resume(dm->dc);
2044
2045         /*
2046          * early enable HPD Rx IRQ, should be done before set mode as short
2047          * pulse interrupts are used for MST
2048          */
2049         amdgpu_dm_irq_resume_early(adev);
2050
2051         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2052         s3_handle_mst(ddev, false);
2053
2054         /* Do detection*/
2055         drm_connector_list_iter_begin(ddev, &iter);
2056         drm_for_each_connector_iter(connector, &iter) {
2057                 aconnector = to_amdgpu_dm_connector(connector);
2058
2059                 /*
2060                  * this is the case when traversing through already created
2061                  * MST connectors, should be skipped
2062                  */
2063                 if (aconnector->mst_port)
2064                         continue;
2065
2066                 mutex_lock(&aconnector->hpd_lock);
2067                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2068                         DRM_ERROR("KMS: Failed to detect connector\n");
2069
2070                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2071                         emulated_link_detect(aconnector->dc_link);
2072                 else
2073                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2074
2075                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2076                         aconnector->fake_enable = false;
2077
2078                 if (aconnector->dc_sink)
2079                         dc_sink_release(aconnector->dc_sink);
2080                 aconnector->dc_sink = NULL;
2081                 amdgpu_dm_update_connector_after_detect(aconnector);
2082                 mutex_unlock(&aconnector->hpd_lock);
2083         }
2084         drm_connector_list_iter_end(&iter);
2085
2086         /* Force mode set in atomic commit */
2087         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2088                 new_crtc_state->active_changed = true;
2089
2090         /*
2091          * atomic_check is expected to create the dc states. We need to release
2092          * them here, since they were duplicated as part of the suspend
2093          * procedure.
2094          */
2095         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2096                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2097                 if (dm_new_crtc_state->stream) {
2098                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2099                         dc_stream_release(dm_new_crtc_state->stream);
2100                         dm_new_crtc_state->stream = NULL;
2101                 }
2102         }
2103
2104         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2105                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2106                 if (dm_new_plane_state->dc_state) {
2107                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2108                         dc_plane_state_release(dm_new_plane_state->dc_state);
2109                         dm_new_plane_state->dc_state = NULL;
2110                 }
2111         }
2112
2113         drm_atomic_helper_resume(ddev, dm->cached_state);
2114
2115         dm->cached_state = NULL;
2116
2117         amdgpu_dm_irq_resume_late(adev);
2118
2119         amdgpu_dm_smu_write_watermarks_table(adev);
2120
2121         return 0;
2122 }
2123
2124 /**
2125  * DOC: DM Lifecycle
2126  *
2127  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2128  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2129  * the base driver's device list to be initialized and torn down accordingly.
2130  *
2131  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2132  */
2133
2134 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2135         .name = "dm",
2136         .early_init = dm_early_init,
2137         .late_init = dm_late_init,
2138         .sw_init = dm_sw_init,
2139         .sw_fini = dm_sw_fini,
2140         .hw_init = dm_hw_init,
2141         .hw_fini = dm_hw_fini,
2142         .suspend = dm_suspend,
2143         .resume = dm_resume,
2144         .is_idle = dm_is_idle,
2145         .wait_for_idle = dm_wait_for_idle,
2146         .check_soft_reset = dm_check_soft_reset,
2147         .soft_reset = dm_soft_reset,
2148         .set_clockgating_state = dm_set_clockgating_state,
2149         .set_powergating_state = dm_set_powergating_state,
2150 };
2151
2152 const struct amdgpu_ip_block_version dm_ip_block =
2153 {
2154         .type = AMD_IP_BLOCK_TYPE_DCE,
2155         .major = 1,
2156         .minor = 0,
2157         .rev = 0,
2158         .funcs = &amdgpu_dm_funcs,
2159 };
2160
2161
2162 /**
2163  * DOC: atomic
2164  *
2165  * *WIP*
2166  */
2167
2168 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2169         .fb_create = amdgpu_display_user_framebuffer_create,
2170         .get_format_info = amd_get_format_info,
2171         .output_poll_changed = drm_fb_helper_output_poll_changed,
2172         .atomic_check = amdgpu_dm_atomic_check,
2173         .atomic_commit = drm_atomic_helper_commit,
2174 };
2175
2176 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2177         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2178 };
2179
2180 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2181 {
2182         u32 max_cll, min_cll, max, min, q, r;
2183         struct amdgpu_dm_backlight_caps *caps;
2184         struct amdgpu_display_manager *dm;
2185         struct drm_connector *conn_base;
2186         struct amdgpu_device *adev;
2187         struct dc_link *link = NULL;
2188         static const u8 pre_computed_values[] = {
2189                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2190                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2191
2192         if (!aconnector || !aconnector->dc_link)
2193                 return;
2194
2195         link = aconnector->dc_link;
2196         if (link->connector_signal != SIGNAL_TYPE_EDP)
2197                 return;
2198
2199         conn_base = &aconnector->base;
2200         adev = drm_to_adev(conn_base->dev);
2201         dm = &adev->dm;
2202         caps = &dm->backlight_caps;
2203         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2204         caps->aux_support = false;
2205         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2206         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2207
2208         if (caps->ext_caps->bits.oled == 1 ||
2209             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2210             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2211                 caps->aux_support = true;
2212
2213         /* From the specification (CTA-861-G), for calculating the maximum
2214          * luminance we need to use:
2215          *      Luminance = 50*2**(CV/32)
2216          * Where CV is a one-byte value.
2217          * For calculating this expression we may need float point precision;
2218          * to avoid this complexity level, we take advantage that CV is divided
2219          * by a constant. From the Euclids division algorithm, we know that CV
2220          * can be written as: CV = 32*q + r. Next, we replace CV in the
2221          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2222          * need to pre-compute the value of r/32. For pre-computing the values
2223          * We just used the following Ruby line:
2224          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2225          * The results of the above expressions can be verified at
2226          * pre_computed_values.
2227          */
2228         q = max_cll >> 5;
2229         r = max_cll % 32;
2230         max = (1 << q) * pre_computed_values[r];
2231
2232         // min luminance: maxLum * (CV/255)^2 / 100
2233         q = DIV_ROUND_CLOSEST(min_cll, 255);
2234         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2235
2236         caps->aux_max_input_signal = max;
2237         caps->aux_min_input_signal = min;
2238 }
2239
2240 void amdgpu_dm_update_connector_after_detect(
2241                 struct amdgpu_dm_connector *aconnector)
2242 {
2243         struct drm_connector *connector = &aconnector->base;
2244         struct drm_device *dev = connector->dev;
2245         struct dc_sink *sink;
2246
2247         /* MST handled by drm_mst framework */
2248         if (aconnector->mst_mgr.mst_state == true)
2249                 return;
2250
2251         sink = aconnector->dc_link->local_sink;
2252         if (sink)
2253                 dc_sink_retain(sink);
2254
2255         /*
2256          * Edid mgmt connector gets first update only in mode_valid hook and then
2257          * the connector sink is set to either fake or physical sink depends on link status.
2258          * Skip if already done during boot.
2259          */
2260         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2261                         && aconnector->dc_em_sink) {
2262
2263                 /*
2264                  * For S3 resume with headless use eml_sink to fake stream
2265                  * because on resume connector->sink is set to NULL
2266                  */
2267                 mutex_lock(&dev->mode_config.mutex);
2268
2269                 if (sink) {
2270                         if (aconnector->dc_sink) {
2271                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2272                                 /*
2273                                  * retain and release below are used to
2274                                  * bump up refcount for sink because the link doesn't point
2275                                  * to it anymore after disconnect, so on next crtc to connector
2276                                  * reshuffle by UMD we will get into unwanted dc_sink release
2277                                  */
2278                                 dc_sink_release(aconnector->dc_sink);
2279                         }
2280                         aconnector->dc_sink = sink;
2281                         dc_sink_retain(aconnector->dc_sink);
2282                         amdgpu_dm_update_freesync_caps(connector,
2283                                         aconnector->edid);
2284                 } else {
2285                         amdgpu_dm_update_freesync_caps(connector, NULL);
2286                         if (!aconnector->dc_sink) {
2287                                 aconnector->dc_sink = aconnector->dc_em_sink;
2288                                 dc_sink_retain(aconnector->dc_sink);
2289                         }
2290                 }
2291
2292                 mutex_unlock(&dev->mode_config.mutex);
2293
2294                 if (sink)
2295                         dc_sink_release(sink);
2296                 return;
2297         }
2298
2299         /*
2300          * TODO: temporary guard to look for proper fix
2301          * if this sink is MST sink, we should not do anything
2302          */
2303         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2304                 dc_sink_release(sink);
2305                 return;
2306         }
2307
2308         if (aconnector->dc_sink == sink) {
2309                 /*
2310                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2311                  * Do nothing!!
2312                  */
2313                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2314                                 aconnector->connector_id);
2315                 if (sink)
2316                         dc_sink_release(sink);
2317                 return;
2318         }
2319
2320         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2321                 aconnector->connector_id, aconnector->dc_sink, sink);
2322
2323         mutex_lock(&dev->mode_config.mutex);
2324
2325         /*
2326          * 1. Update status of the drm connector
2327          * 2. Send an event and let userspace tell us what to do
2328          */
2329         if (sink) {
2330                 /*
2331                  * TODO: check if we still need the S3 mode update workaround.
2332                  * If yes, put it here.
2333                  */
2334                 if (aconnector->dc_sink)
2335                         amdgpu_dm_update_freesync_caps(connector, NULL);
2336
2337                 aconnector->dc_sink = sink;
2338                 dc_sink_retain(aconnector->dc_sink);
2339                 if (sink->dc_edid.length == 0) {
2340                         aconnector->edid = NULL;
2341                         if (aconnector->dc_link->aux_mode) {
2342                                 drm_dp_cec_unset_edid(
2343                                         &aconnector->dm_dp_aux.aux);
2344                         }
2345                 } else {
2346                         aconnector->edid =
2347                                 (struct edid *)sink->dc_edid.raw_edid;
2348
2349                         drm_connector_update_edid_property(connector,
2350                                                            aconnector->edid);
2351                         if (aconnector->dc_link->aux_mode)
2352                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2353                                                     aconnector->edid);
2354                 }
2355
2356                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2357                 update_connector_ext_caps(aconnector);
2358         } else {
2359                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2360                 amdgpu_dm_update_freesync_caps(connector, NULL);
2361                 drm_connector_update_edid_property(connector, NULL);
2362                 aconnector->num_modes = 0;
2363                 dc_sink_release(aconnector->dc_sink);
2364                 aconnector->dc_sink = NULL;
2365                 aconnector->edid = NULL;
2366 #ifdef CONFIG_DRM_AMD_DC_HDCP
2367                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2368                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2369                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2370 #endif
2371         }
2372
2373         mutex_unlock(&dev->mode_config.mutex);
2374
2375         update_subconnector_property(aconnector);
2376
2377         if (sink)
2378                 dc_sink_release(sink);
2379 }
2380
2381 static void handle_hpd_irq(void *param)
2382 {
2383         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2384         struct drm_connector *connector = &aconnector->base;
2385         struct drm_device *dev = connector->dev;
2386         enum dc_connection_type new_connection_type = dc_connection_none;
2387 #ifdef CONFIG_DRM_AMD_DC_HDCP
2388         struct amdgpu_device *adev = drm_to_adev(dev);
2389         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2390 #endif
2391
2392         /*
2393          * In case of failure or MST no need to update connector status or notify the OS
2394          * since (for MST case) MST does this in its own context.
2395          */
2396         mutex_lock(&aconnector->hpd_lock);
2397
2398 #ifdef CONFIG_DRM_AMD_DC_HDCP
2399         if (adev->dm.hdcp_workqueue) {
2400                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2401                 dm_con_state->update_hdcp = true;
2402         }
2403 #endif
2404         if (aconnector->fake_enable)
2405                 aconnector->fake_enable = false;
2406
2407         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2408                 DRM_ERROR("KMS: Failed to detect connector\n");
2409
2410         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2411                 emulated_link_detect(aconnector->dc_link);
2412
2413
2414                 drm_modeset_lock_all(dev);
2415                 dm_restore_drm_connector_state(dev, connector);
2416                 drm_modeset_unlock_all(dev);
2417
2418                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2419                         drm_kms_helper_hotplug_event(dev);
2420
2421         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2422                 if (new_connection_type == dc_connection_none &&
2423                     aconnector->dc_link->type == dc_connection_none)
2424                         dm_set_dpms_off(aconnector->dc_link);
2425
2426                 amdgpu_dm_update_connector_after_detect(aconnector);
2427
2428                 drm_modeset_lock_all(dev);
2429                 dm_restore_drm_connector_state(dev, connector);
2430                 drm_modeset_unlock_all(dev);
2431
2432                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2433                         drm_kms_helper_hotplug_event(dev);
2434         }
2435         mutex_unlock(&aconnector->hpd_lock);
2436
2437 }
2438
2439 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2440 {
2441         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2442         uint8_t dret;
2443         bool new_irq_handled = false;
2444         int dpcd_addr;
2445         int dpcd_bytes_to_read;
2446
2447         const int max_process_count = 30;
2448         int process_count = 0;
2449
2450         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2451
2452         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2453                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2454                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2455                 dpcd_addr = DP_SINK_COUNT;
2456         } else {
2457                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2458                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2459                 dpcd_addr = DP_SINK_COUNT_ESI;
2460         }
2461
2462         dret = drm_dp_dpcd_read(
2463                 &aconnector->dm_dp_aux.aux,
2464                 dpcd_addr,
2465                 esi,
2466                 dpcd_bytes_to_read);
2467
2468         while (dret == dpcd_bytes_to_read &&
2469                 process_count < max_process_count) {
2470                 uint8_t retry;
2471                 dret = 0;
2472
2473                 process_count++;
2474
2475                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2476                 /* handle HPD short pulse irq */
2477                 if (aconnector->mst_mgr.mst_state)
2478                         drm_dp_mst_hpd_irq(
2479                                 &aconnector->mst_mgr,
2480                                 esi,
2481                                 &new_irq_handled);
2482
2483                 if (new_irq_handled) {
2484                         /* ACK at DPCD to notify down stream */
2485                         const int ack_dpcd_bytes_to_write =
2486                                 dpcd_bytes_to_read - 1;
2487
2488                         for (retry = 0; retry < 3; retry++) {
2489                                 uint8_t wret;
2490
2491                                 wret = drm_dp_dpcd_write(
2492                                         &aconnector->dm_dp_aux.aux,
2493                                         dpcd_addr + 1,
2494                                         &esi[1],
2495                                         ack_dpcd_bytes_to_write);
2496                                 if (wret == ack_dpcd_bytes_to_write)
2497                                         break;
2498                         }
2499
2500                         /* check if there is new irq to be handled */
2501                         dret = drm_dp_dpcd_read(
2502                                 &aconnector->dm_dp_aux.aux,
2503                                 dpcd_addr,
2504                                 esi,
2505                                 dpcd_bytes_to_read);
2506
2507                         new_irq_handled = false;
2508                 } else {
2509                         break;
2510                 }
2511         }
2512
2513         if (process_count == max_process_count)
2514                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2515 }
2516
2517 static void handle_hpd_rx_irq(void *param)
2518 {
2519         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2520         struct drm_connector *connector = &aconnector->base;
2521         struct drm_device *dev = connector->dev;
2522         struct dc_link *dc_link = aconnector->dc_link;
2523         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2524         bool result = false;
2525         enum dc_connection_type new_connection_type = dc_connection_none;
2526         struct amdgpu_device *adev = drm_to_adev(dev);
2527         union hpd_irq_data hpd_irq_data;
2528
2529         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2530
2531         /*
2532          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2533          * conflict, after implement i2c helper, this mutex should be
2534          * retired.
2535          */
2536         if (dc_link->type != dc_connection_mst_branch)
2537                 mutex_lock(&aconnector->hpd_lock);
2538
2539         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2540
2541         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2542                 (dc_link->type == dc_connection_mst_branch)) {
2543                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2544                         result = true;
2545                         dm_handle_hpd_rx_irq(aconnector);
2546                         goto out;
2547                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2548                         result = false;
2549                         dm_handle_hpd_rx_irq(aconnector);
2550                         goto out;
2551                 }
2552         }
2553
2554         mutex_lock(&adev->dm.dc_lock);
2555 #ifdef CONFIG_DRM_AMD_DC_HDCP
2556         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2557 #else
2558         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2559 #endif
2560         mutex_unlock(&adev->dm.dc_lock);
2561
2562 out:
2563         if (result && !is_mst_root_connector) {
2564                 /* Downstream Port status changed. */
2565                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2566                         DRM_ERROR("KMS: Failed to detect connector\n");
2567
2568                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2569                         emulated_link_detect(dc_link);
2570
2571                         if (aconnector->fake_enable)
2572                                 aconnector->fake_enable = false;
2573
2574                         amdgpu_dm_update_connector_after_detect(aconnector);
2575
2576
2577                         drm_modeset_lock_all(dev);
2578                         dm_restore_drm_connector_state(dev, connector);
2579                         drm_modeset_unlock_all(dev);
2580
2581                         drm_kms_helper_hotplug_event(dev);
2582                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2583
2584                         if (aconnector->fake_enable)
2585                                 aconnector->fake_enable = false;
2586
2587                         amdgpu_dm_update_connector_after_detect(aconnector);
2588
2589
2590                         drm_modeset_lock_all(dev);
2591                         dm_restore_drm_connector_state(dev, connector);
2592                         drm_modeset_unlock_all(dev);
2593
2594                         drm_kms_helper_hotplug_event(dev);
2595                 }
2596         }
2597 #ifdef CONFIG_DRM_AMD_DC_HDCP
2598         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2599                 if (adev->dm.hdcp_workqueue)
2600                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2601         }
2602 #endif
2603
2604         if (dc_link->type != dc_connection_mst_branch) {
2605                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2606                 mutex_unlock(&aconnector->hpd_lock);
2607         }
2608 }
2609
2610 static void register_hpd_handlers(struct amdgpu_device *adev)
2611 {
2612         struct drm_device *dev = adev_to_drm(adev);
2613         struct drm_connector *connector;
2614         struct amdgpu_dm_connector *aconnector;
2615         const struct dc_link *dc_link;
2616         struct dc_interrupt_params int_params = {0};
2617
2618         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2619         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2620
2621         list_for_each_entry(connector,
2622                         &dev->mode_config.connector_list, head) {
2623
2624                 aconnector = to_amdgpu_dm_connector(connector);
2625                 dc_link = aconnector->dc_link;
2626
2627                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2628                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2629                         int_params.irq_source = dc_link->irq_source_hpd;
2630
2631                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2632                                         handle_hpd_irq,
2633                                         (void *) aconnector);
2634                 }
2635
2636                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2637
2638                         /* Also register for DP short pulse (hpd_rx). */
2639                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2640                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2641
2642                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2643                                         handle_hpd_rx_irq,
2644                                         (void *) aconnector);
2645                 }
2646         }
2647 }
2648
2649 #if defined(CONFIG_DRM_AMD_DC_SI)
2650 /* Register IRQ sources and initialize IRQ callbacks */
2651 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2652 {
2653         struct dc *dc = adev->dm.dc;
2654         struct common_irq_params *c_irq_params;
2655         struct dc_interrupt_params int_params = {0};
2656         int r;
2657         int i;
2658         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2659
2660         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2661         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2662
2663         /*
2664          * Actions of amdgpu_irq_add_id():
2665          * 1. Register a set() function with base driver.
2666          *    Base driver will call set() function to enable/disable an
2667          *    interrupt in DC hardware.
2668          * 2. Register amdgpu_dm_irq_handler().
2669          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2670          *    coming from DC hardware.
2671          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2672          *    for acknowledging and handling. */
2673
2674         /* Use VBLANK interrupt */
2675         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2676                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2677                 if (r) {
2678                         DRM_ERROR("Failed to add crtc irq id!\n");
2679                         return r;
2680                 }
2681
2682                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2683                 int_params.irq_source =
2684                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2685
2686                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2687
2688                 c_irq_params->adev = adev;
2689                 c_irq_params->irq_src = int_params.irq_source;
2690
2691                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2692                                 dm_crtc_high_irq, c_irq_params);
2693         }
2694
2695         /* Use GRPH_PFLIP interrupt */
2696         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2697                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2698                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2699                 if (r) {
2700                         DRM_ERROR("Failed to add page flip irq id!\n");
2701                         return r;
2702                 }
2703
2704                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2705                 int_params.irq_source =
2706                         dc_interrupt_to_irq_source(dc, i, 0);
2707
2708                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2709
2710                 c_irq_params->adev = adev;
2711                 c_irq_params->irq_src = int_params.irq_source;
2712
2713                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2714                                 dm_pflip_high_irq, c_irq_params);
2715
2716         }
2717
2718         /* HPD */
2719         r = amdgpu_irq_add_id(adev, client_id,
2720                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2721         if (r) {
2722                 DRM_ERROR("Failed to add hpd irq id!\n");
2723                 return r;
2724         }
2725
2726         register_hpd_handlers(adev);
2727
2728         return 0;
2729 }
2730 #endif
2731
2732 /* Register IRQ sources and initialize IRQ callbacks */
2733 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2734 {
2735         struct dc *dc = adev->dm.dc;
2736         struct common_irq_params *c_irq_params;
2737         struct dc_interrupt_params int_params = {0};
2738         int r;
2739         int i;
2740         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2741
2742         if (adev->asic_type >= CHIP_VEGA10)
2743                 client_id = SOC15_IH_CLIENTID_DCE;
2744
2745         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2746         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2747
2748         /*
2749          * Actions of amdgpu_irq_add_id():
2750          * 1. Register a set() function with base driver.
2751          *    Base driver will call set() function to enable/disable an
2752          *    interrupt in DC hardware.
2753          * 2. Register amdgpu_dm_irq_handler().
2754          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2755          *    coming from DC hardware.
2756          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2757          *    for acknowledging and handling. */
2758
2759         /* Use VBLANK interrupt */
2760         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2761                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2762                 if (r) {
2763                         DRM_ERROR("Failed to add crtc irq id!\n");
2764                         return r;
2765                 }
2766
2767                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2768                 int_params.irq_source =
2769                         dc_interrupt_to_irq_source(dc, i, 0);
2770
2771                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2772
2773                 c_irq_params->adev = adev;
2774                 c_irq_params->irq_src = int_params.irq_source;
2775
2776                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2777                                 dm_crtc_high_irq, c_irq_params);
2778         }
2779
2780         /* Use VUPDATE interrupt */
2781         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2782                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2783                 if (r) {
2784                         DRM_ERROR("Failed to add vupdate irq id!\n");
2785                         return r;
2786                 }
2787
2788                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2789                 int_params.irq_source =
2790                         dc_interrupt_to_irq_source(dc, i, 0);
2791
2792                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2793
2794                 c_irq_params->adev = adev;
2795                 c_irq_params->irq_src = int_params.irq_source;
2796
2797                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2798                                 dm_vupdate_high_irq, c_irq_params);
2799         }
2800
2801         /* Use GRPH_PFLIP interrupt */
2802         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2803                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2804                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2805                 if (r) {
2806                         DRM_ERROR("Failed to add page flip irq id!\n");
2807                         return r;
2808                 }
2809
2810                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2811                 int_params.irq_source =
2812                         dc_interrupt_to_irq_source(dc, i, 0);
2813
2814                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2815
2816                 c_irq_params->adev = adev;
2817                 c_irq_params->irq_src = int_params.irq_source;
2818
2819                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2820                                 dm_pflip_high_irq, c_irq_params);
2821
2822         }
2823
2824         /* HPD */
2825         r = amdgpu_irq_add_id(adev, client_id,
2826                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2827         if (r) {
2828                 DRM_ERROR("Failed to add hpd irq id!\n");
2829                 return r;
2830         }
2831
2832         register_hpd_handlers(adev);
2833
2834         return 0;
2835 }
2836
2837 #if defined(CONFIG_DRM_AMD_DC_DCN)
2838 /* Register IRQ sources and initialize IRQ callbacks */
2839 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2840 {
2841         struct dc *dc = adev->dm.dc;
2842         struct common_irq_params *c_irq_params;
2843         struct dc_interrupt_params int_params = {0};
2844         int r;
2845         int i;
2846
2847         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2848         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2849
2850         /*
2851          * Actions of amdgpu_irq_add_id():
2852          * 1. Register a set() function with base driver.
2853          *    Base driver will call set() function to enable/disable an
2854          *    interrupt in DC hardware.
2855          * 2. Register amdgpu_dm_irq_handler().
2856          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2857          *    coming from DC hardware.
2858          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2859          *    for acknowledging and handling.
2860          */
2861
2862         /* Use VSTARTUP interrupt */
2863         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2864                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2865                         i++) {
2866                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2867
2868                 if (r) {
2869                         DRM_ERROR("Failed to add crtc irq id!\n");
2870                         return r;
2871                 }
2872
2873                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2874                 int_params.irq_source =
2875                         dc_interrupt_to_irq_source(dc, i, 0);
2876
2877                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2878
2879                 c_irq_params->adev = adev;
2880                 c_irq_params->irq_src = int_params.irq_source;
2881
2882                 amdgpu_dm_irq_register_interrupt(
2883                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2884         }
2885
2886         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2887          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2888          * to trigger at end of each vblank, regardless of state of the lock,
2889          * matching DCE behaviour.
2890          */
2891         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2892              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2893              i++) {
2894                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2895
2896                 if (r) {
2897                         DRM_ERROR("Failed to add vupdate irq id!\n");
2898                         return r;
2899                 }
2900
2901                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2902                 int_params.irq_source =
2903                         dc_interrupt_to_irq_source(dc, i, 0);
2904
2905                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2906
2907                 c_irq_params->adev = adev;
2908                 c_irq_params->irq_src = int_params.irq_source;
2909
2910                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2911                                 dm_vupdate_high_irq, c_irq_params);
2912         }
2913
2914         /* Use GRPH_PFLIP interrupt */
2915         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2916                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2917                         i++) {
2918                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2919                 if (r) {
2920                         DRM_ERROR("Failed to add page flip irq id!\n");
2921                         return r;
2922                 }
2923
2924                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2925                 int_params.irq_source =
2926                         dc_interrupt_to_irq_source(dc, i, 0);
2927
2928                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2929
2930                 c_irq_params->adev = adev;
2931                 c_irq_params->irq_src = int_params.irq_source;
2932
2933                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2934                                 dm_pflip_high_irq, c_irq_params);
2935
2936         }
2937
2938         /* HPD */
2939         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2940                         &adev->hpd_irq);
2941         if (r) {
2942                 DRM_ERROR("Failed to add hpd irq id!\n");
2943                 return r;
2944         }
2945
2946         register_hpd_handlers(adev);
2947
2948         return 0;
2949 }
2950 #endif
2951
2952 /*
2953  * Acquires the lock for the atomic state object and returns
2954  * the new atomic state.
2955  *
2956  * This should only be called during atomic check.
2957  */
2958 static int dm_atomic_get_state(struct drm_atomic_state *state,
2959                                struct dm_atomic_state **dm_state)
2960 {
2961         struct drm_device *dev = state->dev;
2962         struct amdgpu_device *adev = drm_to_adev(dev);
2963         struct amdgpu_display_manager *dm = &adev->dm;
2964         struct drm_private_state *priv_state;
2965
2966         if (*dm_state)
2967                 return 0;
2968
2969         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2970         if (IS_ERR(priv_state))
2971                 return PTR_ERR(priv_state);
2972
2973         *dm_state = to_dm_atomic_state(priv_state);
2974
2975         return 0;
2976 }
2977
2978 static struct dm_atomic_state *
2979 dm_atomic_get_new_state(struct drm_atomic_state *state)
2980 {
2981         struct drm_device *dev = state->dev;
2982         struct amdgpu_device *adev = drm_to_adev(dev);
2983         struct amdgpu_display_manager *dm = &adev->dm;
2984         struct drm_private_obj *obj;
2985         struct drm_private_state *new_obj_state;
2986         int i;
2987
2988         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2989                 if (obj->funcs == dm->atomic_obj.funcs)
2990                         return to_dm_atomic_state(new_obj_state);
2991         }
2992
2993         return NULL;
2994 }
2995
2996 static struct drm_private_state *
2997 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2998 {
2999         struct dm_atomic_state *old_state, *new_state;
3000
3001         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3002         if (!new_state)
3003                 return NULL;
3004
3005         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3006
3007         old_state = to_dm_atomic_state(obj->state);
3008
3009         if (old_state && old_state->context)
3010                 new_state->context = dc_copy_state(old_state->context);
3011
3012         if (!new_state->context) {
3013                 kfree(new_state);
3014                 return NULL;
3015         }
3016
3017         return &new_state->base;
3018 }
3019
3020 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3021                                     struct drm_private_state *state)
3022 {
3023         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3024
3025         if (dm_state && dm_state->context)
3026                 dc_release_state(dm_state->context);
3027
3028         kfree(dm_state);
3029 }
3030
3031 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3032         .atomic_duplicate_state = dm_atomic_duplicate_state,
3033         .atomic_destroy_state = dm_atomic_destroy_state,
3034 };
3035
3036 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3037 {
3038         struct dm_atomic_state *state;
3039         int r;
3040
3041         adev->mode_info.mode_config_initialized = true;
3042
3043         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3044         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3045
3046         adev_to_drm(adev)->mode_config.max_width = 16384;
3047         adev_to_drm(adev)->mode_config.max_height = 16384;
3048
3049         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3050         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3051         /* indicates support for immediate flip */
3052         adev_to_drm(adev)->mode_config.async_page_flip = true;
3053
3054         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3055
3056         state = kzalloc(sizeof(*state), GFP_KERNEL);
3057         if (!state)
3058                 return -ENOMEM;
3059
3060         state->context = dc_create_state(adev->dm.dc);
3061         if (!state->context) {
3062                 kfree(state);
3063                 return -ENOMEM;
3064         }
3065
3066         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3067
3068         drm_atomic_private_obj_init(adev_to_drm(adev),
3069                                     &adev->dm.atomic_obj,
3070                                     &state->base,
3071                                     &dm_atomic_state_funcs);
3072
3073         r = amdgpu_display_modeset_create_props(adev);
3074         if (r) {
3075                 dc_release_state(state->context);
3076                 kfree(state);
3077                 return r;
3078         }
3079
3080         r = amdgpu_dm_audio_init(adev);
3081         if (r) {
3082                 dc_release_state(state->context);
3083                 kfree(state);
3084                 return r;
3085         }
3086
3087         return 0;
3088 }
3089
3090 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3091 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3092 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3093
3094 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3095         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3096
3097 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3098 {
3099 #if defined(CONFIG_ACPI)
3100         struct amdgpu_dm_backlight_caps caps;
3101
3102         memset(&caps, 0, sizeof(caps));
3103
3104         if (dm->backlight_caps.caps_valid)
3105                 return;
3106
3107         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3108         if (caps.caps_valid) {
3109                 dm->backlight_caps.caps_valid = true;
3110                 if (caps.aux_support)
3111                         return;
3112                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3113                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3114         } else {
3115                 dm->backlight_caps.min_input_signal =
3116                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3117                 dm->backlight_caps.max_input_signal =
3118                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3119         }
3120 #else
3121         if (dm->backlight_caps.aux_support)
3122                 return;
3123
3124         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3125         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3126 #endif
3127 }
3128
3129 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3130 {
3131         bool rc;
3132
3133         if (!link)
3134                 return 1;
3135
3136         rc = dc_link_set_backlight_level_nits(link, true, brightness,
3137                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3138
3139         return rc ? 0 : 1;
3140 }
3141
3142 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3143                                 unsigned *min, unsigned *max)
3144 {
3145         if (!caps)
3146                 return 0;
3147
3148         if (caps->aux_support) {
3149                 // Firmware limits are in nits, DC API wants millinits.
3150                 *max = 1000 * caps->aux_max_input_signal;
3151                 *min = 1000 * caps->aux_min_input_signal;
3152         } else {
3153                 // Firmware limits are 8-bit, PWM control is 16-bit.
3154                 *max = 0x101 * caps->max_input_signal;
3155                 *min = 0x101 * caps->min_input_signal;
3156         }
3157         return 1;
3158 }
3159
3160 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3161                                         uint32_t brightness)
3162 {
3163         unsigned min, max;
3164
3165         if (!get_brightness_range(caps, &min, &max))
3166                 return brightness;
3167
3168         // Rescale 0..255 to min..max
3169         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3170                                        AMDGPU_MAX_BL_LEVEL);
3171 }
3172
3173 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3174                                       uint32_t brightness)
3175 {
3176         unsigned min, max;
3177
3178         if (!get_brightness_range(caps, &min, &max))
3179                 return brightness;
3180
3181         if (brightness < min)
3182                 return 0;
3183         // Rescale min..max to 0..255
3184         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3185                                  max - min);
3186 }
3187
3188 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3189 {
3190         struct amdgpu_display_manager *dm = bl_get_data(bd);
3191         struct amdgpu_dm_backlight_caps caps;
3192         struct dc_link *link = NULL;
3193         u32 brightness;
3194         bool rc;
3195
3196         amdgpu_dm_update_backlight_caps(dm);
3197         caps = dm->backlight_caps;
3198
3199         link = (struct dc_link *)dm->backlight_link;
3200
3201         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3202         // Change brightness based on AUX property
3203         if (caps.aux_support)
3204                 return set_backlight_via_aux(link, brightness);
3205
3206         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3207
3208         return rc ? 0 : 1;
3209 }
3210
3211 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3212 {
3213         struct amdgpu_display_manager *dm = bl_get_data(bd);
3214         int ret = dc_link_get_backlight_level(dm->backlight_link);
3215
3216         if (ret == DC_ERROR_UNEXPECTED)
3217                 return bd->props.brightness;
3218         return convert_brightness_to_user(&dm->backlight_caps, ret);
3219 }
3220
3221 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3222         .options = BL_CORE_SUSPENDRESUME,
3223         .get_brightness = amdgpu_dm_backlight_get_brightness,
3224         .update_status  = amdgpu_dm_backlight_update_status,
3225 };
3226
3227 static void
3228 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3229 {
3230         char bl_name[16];
3231         struct backlight_properties props = { 0 };
3232
3233         amdgpu_dm_update_backlight_caps(dm);
3234
3235         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3236         props.brightness = AMDGPU_MAX_BL_LEVEL;
3237         props.type = BACKLIGHT_RAW;
3238
3239         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3240                  adev_to_drm(dm->adev)->primary->index);
3241
3242         dm->backlight_dev = backlight_device_register(bl_name,
3243                                                       adev_to_drm(dm->adev)->dev,
3244                                                       dm,
3245                                                       &amdgpu_dm_backlight_ops,
3246                                                       &props);
3247
3248         if (IS_ERR(dm->backlight_dev))
3249                 DRM_ERROR("DM: Backlight registration failed!\n");
3250         else
3251                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3252 }
3253
3254 #endif
3255
3256 static int initialize_plane(struct amdgpu_display_manager *dm,
3257                             struct amdgpu_mode_info *mode_info, int plane_id,
3258                             enum drm_plane_type plane_type,
3259                             const struct dc_plane_cap *plane_cap)
3260 {
3261         struct drm_plane *plane;
3262         unsigned long possible_crtcs;
3263         int ret = 0;
3264
3265         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3266         if (!plane) {
3267                 DRM_ERROR("KMS: Failed to allocate plane\n");
3268                 return -ENOMEM;
3269         }
3270         plane->type = plane_type;
3271
3272         /*
3273          * HACK: IGT tests expect that the primary plane for a CRTC
3274          * can only have one possible CRTC. Only expose support for
3275          * any CRTC if they're not going to be used as a primary plane
3276          * for a CRTC - like overlay or underlay planes.
3277          */
3278         possible_crtcs = 1 << plane_id;
3279         if (plane_id >= dm->dc->caps.max_streams)
3280                 possible_crtcs = 0xff;
3281
3282         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3283
3284         if (ret) {
3285                 DRM_ERROR("KMS: Failed to initialize plane\n");
3286                 kfree(plane);
3287                 return ret;
3288         }
3289
3290         if (mode_info)
3291                 mode_info->planes[plane_id] = plane;
3292
3293         return ret;
3294 }
3295
3296
3297 static void register_backlight_device(struct amdgpu_display_manager *dm,
3298                                       struct dc_link *link)
3299 {
3300 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3301         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3302
3303         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3304             link->type != dc_connection_none) {
3305                 /*
3306                  * Event if registration failed, we should continue with
3307                  * DM initialization because not having a backlight control
3308                  * is better then a black screen.
3309                  */
3310                 amdgpu_dm_register_backlight_device(dm);
3311
3312                 if (dm->backlight_dev)
3313                         dm->backlight_link = link;
3314         }
3315 #endif
3316 }
3317
3318
3319 /*
3320  * In this architecture, the association
3321  * connector -> encoder -> crtc
3322  * id not really requried. The crtc and connector will hold the
3323  * display_index as an abstraction to use with DAL component
3324  *
3325  * Returns 0 on success
3326  */
3327 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3328 {
3329         struct amdgpu_display_manager *dm = &adev->dm;
3330         int32_t i;
3331         struct amdgpu_dm_connector *aconnector = NULL;
3332         struct amdgpu_encoder *aencoder = NULL;
3333         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3334         uint32_t link_cnt;
3335         int32_t primary_planes;
3336         enum dc_connection_type new_connection_type = dc_connection_none;
3337         const struct dc_plane_cap *plane;
3338
3339         dm->display_indexes_num = dm->dc->caps.max_streams;
3340         /* Update the actual used number of crtc */
3341         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3342
3343         link_cnt = dm->dc->caps.max_links;
3344         if (amdgpu_dm_mode_config_init(dm->adev)) {
3345                 DRM_ERROR("DM: Failed to initialize mode config\n");
3346                 return -EINVAL;
3347         }
3348
3349         /* There is one primary plane per CRTC */
3350         primary_planes = dm->dc->caps.max_streams;
3351         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3352
3353         /*
3354          * Initialize primary planes, implicit planes for legacy IOCTLS.
3355          * Order is reversed to match iteration order in atomic check.
3356          */
3357         for (i = (primary_planes - 1); i >= 0; i--) {
3358                 plane = &dm->dc->caps.planes[i];
3359
3360                 if (initialize_plane(dm, mode_info, i,
3361                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3362                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3363                         goto fail;
3364                 }
3365         }
3366
3367         /*
3368          * Initialize overlay planes, index starting after primary planes.
3369          * These planes have a higher DRM index than the primary planes since
3370          * they should be considered as having a higher z-order.
3371          * Order is reversed to match iteration order in atomic check.
3372          *
3373          * Only support DCN for now, and only expose one so we don't encourage
3374          * userspace to use up all the pipes.
3375          */
3376         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3377                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3378
3379                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3380                         continue;
3381
3382                 if (!plane->blends_with_above || !plane->blends_with_below)
3383                         continue;
3384
3385                 if (!plane->pixel_format_support.argb8888)
3386                         continue;
3387
3388                 if (initialize_plane(dm, NULL, primary_planes + i,
3389                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3390                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3391                         goto fail;
3392                 }
3393
3394                 /* Only create one overlay plane. */
3395                 break;
3396         }
3397
3398         for (i = 0; i < dm->dc->caps.max_streams; i++)
3399                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3400                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3401                         goto fail;
3402                 }
3403
3404         /* loops over all connectors on the board */
3405         for (i = 0; i < link_cnt; i++) {
3406                 struct dc_link *link = NULL;
3407
3408                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3409                         DRM_ERROR(
3410                                 "KMS: Cannot support more than %d display indexes\n",
3411                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3412                         continue;
3413                 }
3414
3415                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3416                 if (!aconnector)
3417                         goto fail;
3418
3419                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3420                 if (!aencoder)
3421                         goto fail;
3422
3423                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3424                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3425                         goto fail;
3426                 }
3427
3428                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3429                         DRM_ERROR("KMS: Failed to initialize connector\n");
3430                         goto fail;
3431                 }
3432
3433                 link = dc_get_link_at_index(dm->dc, i);
3434
3435                 if (!dc_link_detect_sink(link, &new_connection_type))
3436                         DRM_ERROR("KMS: Failed to detect connector\n");
3437
3438                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3439                         emulated_link_detect(link);
3440                         amdgpu_dm_update_connector_after_detect(aconnector);
3441
3442                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3443                         amdgpu_dm_update_connector_after_detect(aconnector);
3444                         register_backlight_device(dm, link);
3445                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3446                                 amdgpu_dm_set_psr_caps(link);
3447                 }
3448
3449
3450         }
3451
3452         /* Software is initialized. Now we can register interrupt handlers. */
3453         switch (adev->asic_type) {
3454 #if defined(CONFIG_DRM_AMD_DC_SI)
3455         case CHIP_TAHITI:
3456         case CHIP_PITCAIRN:
3457         case CHIP_VERDE:
3458         case CHIP_OLAND:
3459                 if (dce60_register_irq_handlers(dm->adev)) {
3460                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3461                         goto fail;
3462                 }
3463                 break;
3464 #endif
3465         case CHIP_BONAIRE:
3466         case CHIP_HAWAII:
3467         case CHIP_KAVERI:
3468         case CHIP_KABINI:
3469         case CHIP_MULLINS:
3470         case CHIP_TONGA:
3471         case CHIP_FIJI:
3472         case CHIP_CARRIZO:
3473         case CHIP_STONEY:
3474         case CHIP_POLARIS11:
3475         case CHIP_POLARIS10:
3476         case CHIP_POLARIS12:
3477         case CHIP_VEGAM:
3478         case CHIP_VEGA10:
3479         case CHIP_VEGA12:
3480         case CHIP_VEGA20:
3481                 if (dce110_register_irq_handlers(dm->adev)) {
3482                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3483                         goto fail;
3484                 }
3485                 break;
3486 #if defined(CONFIG_DRM_AMD_DC_DCN)
3487         case CHIP_RAVEN:
3488         case CHIP_NAVI12:
3489         case CHIP_NAVI10:
3490         case CHIP_NAVI14:
3491         case CHIP_RENOIR:
3492         case CHIP_SIENNA_CICHLID:
3493         case CHIP_NAVY_FLOUNDER:
3494         case CHIP_DIMGREY_CAVEFISH:
3495         case CHIP_VANGOGH:
3496                 if (dcn10_register_irq_handlers(dm->adev)) {
3497                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3498                         goto fail;
3499                 }
3500                 break;
3501 #endif
3502         default:
3503                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3504                 goto fail;
3505         }
3506
3507         return 0;
3508 fail:
3509         kfree(aencoder);
3510         kfree(aconnector);
3511
3512         return -EINVAL;
3513 }
3514
3515 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3516 {
3517         drm_mode_config_cleanup(dm->ddev);
3518         drm_atomic_private_obj_fini(&dm->atomic_obj);
3519         return;
3520 }
3521
3522 /******************************************************************************
3523  * amdgpu_display_funcs functions
3524  *****************************************************************************/
3525
3526 /*
3527  * dm_bandwidth_update - program display watermarks
3528  *
3529  * @adev: amdgpu_device pointer
3530  *
3531  * Calculate and program the display watermarks and line buffer allocation.
3532  */
3533 static void dm_bandwidth_update(struct amdgpu_device *adev)
3534 {
3535         /* TODO: implement later */
3536 }
3537
3538 static const struct amdgpu_display_funcs dm_display_funcs = {
3539         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3540         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3541         .backlight_set_level = NULL, /* never called for DC */
3542         .backlight_get_level = NULL, /* never called for DC */
3543         .hpd_sense = NULL,/* called unconditionally */
3544         .hpd_set_polarity = NULL, /* called unconditionally */
3545         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3546         .page_flip_get_scanoutpos =
3547                 dm_crtc_get_scanoutpos,/* called unconditionally */
3548         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3549         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3550 };
3551
3552 #if defined(CONFIG_DEBUG_KERNEL_DC)
3553
3554 static ssize_t s3_debug_store(struct device *device,
3555                               struct device_attribute *attr,
3556                               const char *buf,
3557                               size_t count)
3558 {
3559         int ret;
3560         int s3_state;
3561         struct drm_device *drm_dev = dev_get_drvdata(device);
3562         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3563
3564         ret = kstrtoint(buf, 0, &s3_state);
3565
3566         if (ret == 0) {
3567                 if (s3_state) {
3568                         dm_resume(adev);
3569                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3570                 } else
3571                         dm_suspend(adev);
3572         }
3573
3574         return ret == 0 ? count : 0;
3575 }
3576
3577 DEVICE_ATTR_WO(s3_debug);
3578
3579 #endif
3580
3581 static int dm_early_init(void *handle)
3582 {
3583         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3584
3585         switch (adev->asic_type) {
3586 #if defined(CONFIG_DRM_AMD_DC_SI)
3587         case CHIP_TAHITI:
3588         case CHIP_PITCAIRN:
3589         case CHIP_VERDE:
3590                 adev->mode_info.num_crtc = 6;
3591                 adev->mode_info.num_hpd = 6;
3592                 adev->mode_info.num_dig = 6;
3593                 break;
3594         case CHIP_OLAND:
3595                 adev->mode_info.num_crtc = 2;
3596                 adev->mode_info.num_hpd = 2;
3597                 adev->mode_info.num_dig = 2;
3598                 break;
3599 #endif
3600         case CHIP_BONAIRE:
3601         case CHIP_HAWAII:
3602                 adev->mode_info.num_crtc = 6;
3603                 adev->mode_info.num_hpd = 6;
3604                 adev->mode_info.num_dig = 6;
3605                 break;
3606         case CHIP_KAVERI:
3607                 adev->mode_info.num_crtc = 4;
3608                 adev->mode_info.num_hpd = 6;
3609                 adev->mode_info.num_dig = 7;
3610                 break;
3611         case CHIP_KABINI:
3612         case CHIP_MULLINS:
3613                 adev->mode_info.num_crtc = 2;
3614                 adev->mode_info.num_hpd = 6;
3615                 adev->mode_info.num_dig = 6;
3616                 break;
3617         case CHIP_FIJI:
3618         case CHIP_TONGA:
3619                 adev->mode_info.num_crtc = 6;
3620                 adev->mode_info.num_hpd = 6;
3621                 adev->mode_info.num_dig = 7;
3622                 break;
3623         case CHIP_CARRIZO:
3624                 adev->mode_info.num_crtc = 3;
3625                 adev->mode_info.num_hpd = 6;
3626                 adev->mode_info.num_dig = 9;
3627                 break;
3628         case CHIP_STONEY:
3629                 adev->mode_info.num_crtc = 2;
3630                 adev->mode_info.num_hpd = 6;
3631                 adev->mode_info.num_dig = 9;
3632                 break;
3633         case CHIP_POLARIS11:
3634         case CHIP_POLARIS12:
3635                 adev->mode_info.num_crtc = 5;
3636                 adev->mode_info.num_hpd = 5;
3637                 adev->mode_info.num_dig = 5;
3638                 break;
3639         case CHIP_POLARIS10:
3640         case CHIP_VEGAM:
3641                 adev->mode_info.num_crtc = 6;
3642                 adev->mode_info.num_hpd = 6;
3643                 adev->mode_info.num_dig = 6;
3644                 break;
3645         case CHIP_VEGA10:
3646         case CHIP_VEGA12:
3647         case CHIP_VEGA20:
3648                 adev->mode_info.num_crtc = 6;
3649                 adev->mode_info.num_hpd = 6;
3650                 adev->mode_info.num_dig = 6;
3651                 break;
3652 #if defined(CONFIG_DRM_AMD_DC_DCN)
3653         case CHIP_RAVEN:
3654         case CHIP_RENOIR:
3655         case CHIP_VANGOGH:
3656                 adev->mode_info.num_crtc = 4;
3657                 adev->mode_info.num_hpd = 4;
3658                 adev->mode_info.num_dig = 4;
3659                 break;
3660         case CHIP_NAVI10:
3661         case CHIP_NAVI12:
3662         case CHIP_SIENNA_CICHLID:
3663         case CHIP_NAVY_FLOUNDER:
3664                 adev->mode_info.num_crtc = 6;
3665                 adev->mode_info.num_hpd = 6;
3666                 adev->mode_info.num_dig = 6;
3667                 break;
3668         case CHIP_NAVI14:
3669         case CHIP_DIMGREY_CAVEFISH:
3670                 adev->mode_info.num_crtc = 5;
3671                 adev->mode_info.num_hpd = 5;
3672                 adev->mode_info.num_dig = 5;
3673                 break;
3674 #endif
3675         default:
3676                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3677                 return -EINVAL;
3678         }
3679
3680         amdgpu_dm_set_irq_funcs(adev);
3681
3682         if (adev->mode_info.funcs == NULL)
3683                 adev->mode_info.funcs = &dm_display_funcs;
3684
3685         /*
3686          * Note: Do NOT change adev->audio_endpt_rreg and
3687          * adev->audio_endpt_wreg because they are initialised in
3688          * amdgpu_device_init()
3689          */
3690 #if defined(CONFIG_DEBUG_KERNEL_DC)
3691         device_create_file(
3692                 adev_to_drm(adev)->dev,
3693                 &dev_attr_s3_debug);
3694 #endif
3695
3696         return 0;
3697 }
3698
3699 static bool modeset_required(struct drm_crtc_state *crtc_state,
3700                              struct dc_stream_state *new_stream,
3701                              struct dc_stream_state *old_stream)
3702 {
3703         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3704 }
3705
3706 static bool modereset_required(struct drm_crtc_state *crtc_state)
3707 {
3708         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3709 }
3710
3711 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3712 {
3713         drm_encoder_cleanup(encoder);
3714         kfree(encoder);
3715 }
3716
3717 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3718         .destroy = amdgpu_dm_encoder_destroy,
3719 };
3720
3721
3722 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3723                                          struct drm_framebuffer *fb,
3724                                          int *min_downscale, int *max_upscale)
3725 {
3726         struct amdgpu_device *adev = drm_to_adev(dev);
3727         struct dc *dc = adev->dm.dc;
3728         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3729         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3730
3731         switch (fb->format->format) {
3732         case DRM_FORMAT_P010:
3733         case DRM_FORMAT_NV12:
3734         case DRM_FORMAT_NV21:
3735                 *max_upscale = plane_cap->max_upscale_factor.nv12;
3736                 *min_downscale = plane_cap->max_downscale_factor.nv12;
3737                 break;
3738
3739         case DRM_FORMAT_XRGB16161616F:
3740         case DRM_FORMAT_ARGB16161616F:
3741         case DRM_FORMAT_XBGR16161616F:
3742         case DRM_FORMAT_ABGR16161616F:
3743                 *max_upscale = plane_cap->max_upscale_factor.fp16;
3744                 *min_downscale = plane_cap->max_downscale_factor.fp16;
3745                 break;
3746
3747         default:
3748                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3749                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3750                 break;
3751         }
3752
3753         /*
3754          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3755          * scaling factor of 1.0 == 1000 units.
3756          */
3757         if (*max_upscale == 1)
3758                 *max_upscale = 1000;
3759
3760         if (*min_downscale == 1)
3761                 *min_downscale = 1000;
3762 }
3763
3764
3765 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3766                                 struct dc_scaling_info *scaling_info)
3767 {
3768         int scale_w, scale_h, min_downscale, max_upscale;
3769
3770         memset(scaling_info, 0, sizeof(*scaling_info));
3771
3772         /* Source is fixed 16.16 but we ignore mantissa for now... */
3773         scaling_info->src_rect.x = state->src_x >> 16;
3774         scaling_info->src_rect.y = state->src_y >> 16;
3775
3776         scaling_info->src_rect.width = state->src_w >> 16;
3777         if (scaling_info->src_rect.width == 0)
3778                 return -EINVAL;
3779
3780         scaling_info->src_rect.height = state->src_h >> 16;
3781         if (scaling_info->src_rect.height == 0)
3782                 return -EINVAL;
3783
3784         scaling_info->dst_rect.x = state->crtc_x;
3785         scaling_info->dst_rect.y = state->crtc_y;
3786
3787         if (state->crtc_w == 0)
3788                 return -EINVAL;
3789
3790         scaling_info->dst_rect.width = state->crtc_w;
3791
3792         if (state->crtc_h == 0)
3793                 return -EINVAL;
3794
3795         scaling_info->dst_rect.height = state->crtc_h;
3796
3797         /* DRM doesn't specify clipping on destination output. */
3798         scaling_info->clip_rect = scaling_info->dst_rect;
3799
3800         /* Validate scaling per-format with DC plane caps */
3801         if (state->plane && state->plane->dev && state->fb) {
3802                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3803                                              &min_downscale, &max_upscale);
3804         } else {
3805                 min_downscale = 250;
3806                 max_upscale = 16000;
3807         }
3808
3809         scale_w = scaling_info->dst_rect.width * 1000 /
3810                   scaling_info->src_rect.width;
3811
3812         if (scale_w < min_downscale || scale_w > max_upscale)
3813                 return -EINVAL;
3814
3815         scale_h = scaling_info->dst_rect.height * 1000 /
3816                   scaling_info->src_rect.height;
3817
3818         if (scale_h < min_downscale || scale_h > max_upscale)
3819                 return -EINVAL;
3820
3821         /*
3822          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3823          * assume reasonable defaults based on the format.
3824          */
3825
3826         return 0;
3827 }
3828
3829 static void
3830 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3831                                  uint64_t tiling_flags)
3832 {
3833         /* Fill GFX8 params */
3834         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3835                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3836
3837                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3838                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3839                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3840                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3841                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3842
3843                 /* XXX fix me for VI */
3844                 tiling_info->gfx8.num_banks = num_banks;
3845                 tiling_info->gfx8.array_mode =
3846                                 DC_ARRAY_2D_TILED_THIN1;
3847                 tiling_info->gfx8.tile_split = tile_split;
3848                 tiling_info->gfx8.bank_width = bankw;
3849                 tiling_info->gfx8.bank_height = bankh;
3850                 tiling_info->gfx8.tile_aspect = mtaspect;
3851                 tiling_info->gfx8.tile_mode =
3852                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3853         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3854                         == DC_ARRAY_1D_TILED_THIN1) {
3855                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3856         }
3857
3858         tiling_info->gfx8.pipe_config =
3859                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3860 }
3861
3862 static void
3863 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3864                                   union dc_tiling_info *tiling_info)
3865 {
3866         tiling_info->gfx9.num_pipes =
3867                 adev->gfx.config.gb_addr_config_fields.num_pipes;
3868         tiling_info->gfx9.num_banks =
3869                 adev->gfx.config.gb_addr_config_fields.num_banks;
3870         tiling_info->gfx9.pipe_interleave =
3871                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3872         tiling_info->gfx9.num_shader_engines =
3873                 adev->gfx.config.gb_addr_config_fields.num_se;
3874         tiling_info->gfx9.max_compressed_frags =
3875                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3876         tiling_info->gfx9.num_rb_per_se =
3877                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3878         tiling_info->gfx9.shaderEnable = 1;
3879         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3880             adev->asic_type == CHIP_NAVY_FLOUNDER ||
3881             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3882             adev->asic_type == CHIP_VANGOGH)
3883                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3884 }
3885
3886 static int
3887 validate_dcc(struct amdgpu_device *adev,
3888              const enum surface_pixel_format format,
3889              const enum dc_rotation_angle rotation,
3890              const union dc_tiling_info *tiling_info,
3891              const struct dc_plane_dcc_param *dcc,
3892              const struct dc_plane_address *address,
3893              const struct plane_size *plane_size)
3894 {
3895         struct dc *dc = adev->dm.dc;
3896         struct dc_dcc_surface_param input;
3897         struct dc_surface_dcc_cap output;
3898
3899         memset(&input, 0, sizeof(input));
3900         memset(&output, 0, sizeof(output));
3901
3902         if (!dcc->enable)
3903                 return 0;
3904
3905         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3906             !dc->cap_funcs.get_dcc_compression_cap)
3907                 return -EINVAL;
3908
3909         input.format = format;
3910         input.surface_size.width = plane_size->surface_size.width;
3911         input.surface_size.height = plane_size->surface_size.height;
3912         input.swizzle_mode = tiling_info->gfx9.swizzle;
3913
3914         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3915                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3916         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3917                 input.scan = SCAN_DIRECTION_VERTICAL;
3918
3919         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3920                 return -EINVAL;
3921
3922         if (!output.capable)
3923                 return -EINVAL;
3924
3925         if (dcc->independent_64b_blks == 0 &&
3926             output.grph.rgb.independent_64b_blks != 0)
3927                 return -EINVAL;
3928
3929         return 0;
3930 }
3931
3932 static bool
3933 modifier_has_dcc(uint64_t modifier)
3934 {
3935         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3936 }
3937
3938 static unsigned
3939 modifier_gfx9_swizzle_mode(uint64_t modifier)
3940 {
3941         if (modifier == DRM_FORMAT_MOD_LINEAR)
3942                 return 0;
3943
3944         return AMD_FMT_MOD_GET(TILE, modifier);
3945 }
3946
3947 static const struct drm_format_info *
3948 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3949 {
3950         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3951 }
3952
3953 static void
3954 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3955                                     union dc_tiling_info *tiling_info,
3956                                     uint64_t modifier)
3957 {
3958         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3959         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3960         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3961         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3962
3963         fill_gfx9_tiling_info_from_device(adev, tiling_info);
3964
3965         if (!IS_AMD_FMT_MOD(modifier))
3966                 return;
3967
3968         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3969         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3970
3971         if (adev->family >= AMDGPU_FAMILY_NV) {
3972                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3973         } else {
3974                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3975
3976                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3977         }
3978 }
3979
3980 enum dm_micro_swizzle {
3981         MICRO_SWIZZLE_Z = 0,
3982         MICRO_SWIZZLE_S = 1,
3983         MICRO_SWIZZLE_D = 2,
3984         MICRO_SWIZZLE_R = 3
3985 };
3986
3987 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3988                                           uint32_t format,
3989                                           uint64_t modifier)
3990 {
3991         struct amdgpu_device *adev = drm_to_adev(plane->dev);
3992         const struct drm_format_info *info = drm_format_info(format);
3993
3994         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3995
3996         if (!info)
3997                 return false;
3998
3999         /*
4000          * We always have to allow this modifier, because core DRM still
4001          * checks LINEAR support if userspace does not provide modifers.
4002          */
4003         if (modifier == DRM_FORMAT_MOD_LINEAR)
4004                 return true;
4005
4006         /*
4007          * The arbitrary tiling support for multiplane formats has not been hooked
4008          * up.
4009          */
4010         if (info->num_planes > 1)
4011                 return false;
4012
4013         /*
4014          * For D swizzle the canonical modifier depends on the bpp, so check
4015          * it here.
4016          */
4017         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4018             adev->family >= AMDGPU_FAMILY_NV) {
4019                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4020                         return false;
4021         }
4022
4023         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4024             info->cpp[0] < 8)
4025                 return false;
4026
4027         if (modifier_has_dcc(modifier)) {
4028                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4029                 if (info->cpp[0] != 4)
4030                         return false;
4031         }
4032
4033         return true;
4034 }
4035
4036 static void
4037 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4038 {
4039         if (!*mods)
4040                 return;
4041
4042         if (*cap - *size < 1) {
4043                 uint64_t new_cap = *cap * 2;
4044                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4045
4046                 if (!new_mods) {
4047                         kfree(*mods);
4048                         *mods = NULL;
4049                         return;
4050                 }
4051
4052                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4053                 kfree(*mods);
4054                 *mods = new_mods;
4055                 *cap = new_cap;
4056         }
4057
4058         (*mods)[*size] = mod;
4059         *size += 1;
4060 }
4061
4062 static void
4063 add_gfx9_modifiers(const struct amdgpu_device *adev,
4064                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4065 {
4066         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4067         int pipe_xor_bits = min(8, pipes +
4068                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4069         int bank_xor_bits = min(8 - pipe_xor_bits,
4070                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4071         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4072                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4073
4074
4075         if (adev->family == AMDGPU_FAMILY_RV) {
4076                 /* Raven2 and later */
4077                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4078
4079                 /*
4080                  * No _D DCC swizzles yet because we only allow 32bpp, which
4081                  * doesn't support _D on DCN
4082                  */
4083
4084                 if (has_constant_encode) {
4085                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4086                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4087                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4088                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4089                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4090                                     AMD_FMT_MOD_SET(DCC, 1) |
4091                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4092                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4093                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4094                 }
4095
4096                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4097                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4098                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4099                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4100                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4101                             AMD_FMT_MOD_SET(DCC, 1) |
4102                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4103                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4104                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4105
4106                 if (has_constant_encode) {
4107                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4108                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4109                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4110                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4111                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4112                                     AMD_FMT_MOD_SET(DCC, 1) |
4113                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4114                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4115                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4116
4117                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4118                                     AMD_FMT_MOD_SET(RB, rb) |
4119                                     AMD_FMT_MOD_SET(PIPE, pipes));
4120                 }
4121
4122                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4123                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4124                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4125                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4126                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4127                             AMD_FMT_MOD_SET(DCC, 1) |
4128                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4129                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4130                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4131                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4132                             AMD_FMT_MOD_SET(RB, rb) |
4133                             AMD_FMT_MOD_SET(PIPE, pipes));
4134         }
4135
4136         /*
4137          * Only supported for 64bpp on Raven, will be filtered on format in
4138          * dm_plane_format_mod_supported.
4139          */
4140         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4141                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4142                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4143                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4144                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4145
4146         if (adev->family == AMDGPU_FAMILY_RV) {
4147                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4148                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4149                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4150                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4151                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4152         }
4153
4154         /*
4155          * Only supported for 64bpp on Raven, will be filtered on format in
4156          * dm_plane_format_mod_supported.
4157          */
4158         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4159                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4160                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4161
4162         if (adev->family == AMDGPU_FAMILY_RV) {
4163                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4164                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4165                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4166         }
4167 }
4168
4169 static void
4170 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4171                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4172 {
4173         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4174
4175         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4176                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4177                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4178                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4179                     AMD_FMT_MOD_SET(DCC, 1) |
4180                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4181                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4182                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4183
4184         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4185                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4186                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4187                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4188                     AMD_FMT_MOD_SET(DCC, 1) |
4189                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4190                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4191                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4192                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4193
4194         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4195                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4196                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4197                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4198
4199         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4200                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4201                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4202                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4203
4204
4205         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4206         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4207                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4208                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4209
4210         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4211                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4212                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4213 }
4214
4215 static void
4216 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4217                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4218 {
4219         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4220         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4221
4222         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4223                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4224                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4225                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4226                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4227                     AMD_FMT_MOD_SET(DCC, 1) |
4228                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4229                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4230                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4231                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4232
4233         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4234                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4235                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4236                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4237                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4238                     AMD_FMT_MOD_SET(DCC, 1) |
4239                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4240                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4241                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4242                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4243                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4244
4245         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4246                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4247                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4248                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4249                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4250
4251         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4252                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4253                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4254                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4255                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4256
4257         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4258         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4259                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4260                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4261
4262         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4263                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4264                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4265 }
4266
4267 static int
4268 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4269 {
4270         uint64_t size = 0, capacity = 128;
4271         *mods = NULL;
4272
4273         /* We have not hooked up any pre-GFX9 modifiers. */
4274         if (adev->family < AMDGPU_FAMILY_AI)
4275                 return 0;
4276
4277         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4278
4279         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4280                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4281                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4282                 return *mods ? 0 : -ENOMEM;
4283         }
4284
4285         switch (adev->family) {
4286         case AMDGPU_FAMILY_AI:
4287         case AMDGPU_FAMILY_RV:
4288                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4289                 break;
4290         case AMDGPU_FAMILY_NV:
4291         case AMDGPU_FAMILY_VGH:
4292                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4293                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4294                 else
4295                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4296                 break;
4297         }
4298
4299         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4300
4301         /* INVALID marks the end of the list. */
4302         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4303
4304         if (!*mods)
4305                 return -ENOMEM;
4306
4307         return 0;
4308 }
4309
4310 static int
4311 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4312                                           const struct amdgpu_framebuffer *afb,
4313                                           const enum surface_pixel_format format,
4314                                           const enum dc_rotation_angle rotation,
4315                                           const struct plane_size *plane_size,
4316                                           union dc_tiling_info *tiling_info,
4317                                           struct dc_plane_dcc_param *dcc,
4318                                           struct dc_plane_address *address,
4319                                           const bool force_disable_dcc)
4320 {
4321         const uint64_t modifier = afb->base.modifier;
4322         int ret;
4323
4324         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4325         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4326
4327         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4328                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4329
4330                 dcc->enable = 1;
4331                 dcc->meta_pitch = afb->base.pitches[1];
4332                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4333
4334                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4335                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4336         }
4337
4338         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4339         if (ret)
4340                 return ret;
4341
4342         return 0;
4343 }
4344
4345 static int
4346 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4347                              const struct amdgpu_framebuffer *afb,
4348                              const enum surface_pixel_format format,
4349                              const enum dc_rotation_angle rotation,
4350                              const uint64_t tiling_flags,
4351                              union dc_tiling_info *tiling_info,
4352                              struct plane_size *plane_size,
4353                              struct dc_plane_dcc_param *dcc,
4354                              struct dc_plane_address *address,
4355                              bool tmz_surface,
4356                              bool force_disable_dcc)
4357 {
4358         const struct drm_framebuffer *fb = &afb->base;
4359         int ret;
4360
4361         memset(tiling_info, 0, sizeof(*tiling_info));
4362         memset(plane_size, 0, sizeof(*plane_size));
4363         memset(dcc, 0, sizeof(*dcc));
4364         memset(address, 0, sizeof(*address));
4365
4366         address->tmz_surface = tmz_surface;
4367
4368         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4369                 uint64_t addr = afb->address + fb->offsets[0];
4370
4371                 plane_size->surface_size.x = 0;
4372                 plane_size->surface_size.y = 0;
4373                 plane_size->surface_size.width = fb->width;
4374                 plane_size->surface_size.height = fb->height;
4375                 plane_size->surface_pitch =
4376                         fb->pitches[0] / fb->format->cpp[0];
4377
4378                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4379                 address->grph.addr.low_part = lower_32_bits(addr);
4380                 address->grph.addr.high_part = upper_32_bits(addr);
4381         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4382                 uint64_t luma_addr = afb->address + fb->offsets[0];
4383                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4384
4385                 plane_size->surface_size.x = 0;
4386                 plane_size->surface_size.y = 0;
4387                 plane_size->surface_size.width = fb->width;
4388                 plane_size->surface_size.height = fb->height;
4389                 plane_size->surface_pitch =
4390                         fb->pitches[0] / fb->format->cpp[0];
4391
4392                 plane_size->chroma_size.x = 0;
4393                 plane_size->chroma_size.y = 0;
4394                 /* TODO: set these based on surface format */
4395                 plane_size->chroma_size.width = fb->width / 2;
4396                 plane_size->chroma_size.height = fb->height / 2;
4397
4398                 plane_size->chroma_pitch =
4399                         fb->pitches[1] / fb->format->cpp[1];
4400
4401                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4402                 address->video_progressive.luma_addr.low_part =
4403                         lower_32_bits(luma_addr);
4404                 address->video_progressive.luma_addr.high_part =
4405                         upper_32_bits(luma_addr);
4406                 address->video_progressive.chroma_addr.low_part =
4407                         lower_32_bits(chroma_addr);
4408                 address->video_progressive.chroma_addr.high_part =
4409                         upper_32_bits(chroma_addr);
4410         }
4411
4412         if (adev->family >= AMDGPU_FAMILY_AI) {
4413                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4414                                                                 rotation, plane_size,
4415                                                                 tiling_info, dcc,
4416                                                                 address,
4417                                                                 force_disable_dcc);
4418                 if (ret)
4419                         return ret;
4420         } else {
4421                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4422         }
4423
4424         return 0;
4425 }
4426
4427 static void
4428 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4429                                bool *per_pixel_alpha, bool *global_alpha,
4430                                int *global_alpha_value)
4431 {
4432         *per_pixel_alpha = false;
4433         *global_alpha = false;
4434         *global_alpha_value = 0xff;
4435
4436         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4437                 return;
4438
4439         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4440                 static const uint32_t alpha_formats[] = {
4441                         DRM_FORMAT_ARGB8888,
4442                         DRM_FORMAT_RGBA8888,
4443                         DRM_FORMAT_ABGR8888,
4444                 };
4445                 uint32_t format = plane_state->fb->format->format;
4446                 unsigned int i;
4447
4448                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4449                         if (format == alpha_formats[i]) {
4450                                 *per_pixel_alpha = true;
4451                                 break;
4452                         }
4453                 }
4454         }
4455
4456         if (plane_state->alpha < 0xffff) {
4457                 *global_alpha = true;
4458                 *global_alpha_value = plane_state->alpha >> 8;
4459         }
4460 }
4461
4462 static int
4463 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4464                             const enum surface_pixel_format format,
4465                             enum dc_color_space *color_space)
4466 {
4467         bool full_range;
4468
4469         *color_space = COLOR_SPACE_SRGB;
4470
4471         /* DRM color properties only affect non-RGB formats. */
4472         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4473                 return 0;
4474
4475         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4476
4477         switch (plane_state->color_encoding) {
4478         case DRM_COLOR_YCBCR_BT601:
4479                 if (full_range)
4480                         *color_space = COLOR_SPACE_YCBCR601;
4481                 else
4482                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4483                 break;
4484
4485         case DRM_COLOR_YCBCR_BT709:
4486                 if (full_range)
4487                         *color_space = COLOR_SPACE_YCBCR709;
4488                 else
4489                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4490                 break;
4491
4492         case DRM_COLOR_YCBCR_BT2020:
4493                 if (full_range)
4494                         *color_space = COLOR_SPACE_2020_YCBCR;
4495                 else
4496                         return -EINVAL;
4497                 break;
4498
4499         default:
4500                 return -EINVAL;
4501         }
4502
4503         return 0;
4504 }
4505
4506 static int
4507 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4508                             const struct drm_plane_state *plane_state,
4509                             const uint64_t tiling_flags,
4510                             struct dc_plane_info *plane_info,
4511                             struct dc_plane_address *address,
4512                             bool tmz_surface,
4513                             bool force_disable_dcc)
4514 {
4515         const struct drm_framebuffer *fb = plane_state->fb;
4516         const struct amdgpu_framebuffer *afb =
4517                 to_amdgpu_framebuffer(plane_state->fb);
4518         struct drm_format_name_buf format_name;
4519         int ret;
4520
4521         memset(plane_info, 0, sizeof(*plane_info));
4522
4523         switch (fb->format->format) {
4524         case DRM_FORMAT_C8:
4525                 plane_info->format =
4526                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4527                 break;
4528         case DRM_FORMAT_RGB565:
4529                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4530                 break;
4531         case DRM_FORMAT_XRGB8888:
4532         case DRM_FORMAT_ARGB8888:
4533                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4534                 break;
4535         case DRM_FORMAT_XRGB2101010:
4536         case DRM_FORMAT_ARGB2101010:
4537                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4538                 break;
4539         case DRM_FORMAT_XBGR2101010:
4540         case DRM_FORMAT_ABGR2101010:
4541                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4542                 break;
4543         case DRM_FORMAT_XBGR8888:
4544         case DRM_FORMAT_ABGR8888:
4545                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4546                 break;
4547         case DRM_FORMAT_NV21:
4548                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4549                 break;
4550         case DRM_FORMAT_NV12:
4551                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4552                 break;
4553         case DRM_FORMAT_P010:
4554                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4555                 break;
4556         case DRM_FORMAT_XRGB16161616F:
4557         case DRM_FORMAT_ARGB16161616F:
4558                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4559                 break;
4560         case DRM_FORMAT_XBGR16161616F:
4561         case DRM_FORMAT_ABGR16161616F:
4562                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4563                 break;
4564         default:
4565                 DRM_ERROR(
4566                         "Unsupported screen format %s\n",
4567                         drm_get_format_name(fb->format->format, &format_name));
4568                 return -EINVAL;
4569         }
4570
4571         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4572         case DRM_MODE_ROTATE_0:
4573                 plane_info->rotation = ROTATION_ANGLE_0;
4574                 break;
4575         case DRM_MODE_ROTATE_90:
4576                 plane_info->rotation = ROTATION_ANGLE_90;
4577                 break;
4578         case DRM_MODE_ROTATE_180:
4579                 plane_info->rotation = ROTATION_ANGLE_180;
4580                 break;
4581         case DRM_MODE_ROTATE_270:
4582                 plane_info->rotation = ROTATION_ANGLE_270;
4583                 break;
4584         default:
4585                 plane_info->rotation = ROTATION_ANGLE_0;
4586                 break;
4587         }
4588
4589         plane_info->visible = true;
4590         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4591
4592         plane_info->layer_index = 0;
4593
4594         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4595                                           &plane_info->color_space);
4596         if (ret)
4597                 return ret;
4598
4599         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4600                                            plane_info->rotation, tiling_flags,
4601                                            &plane_info->tiling_info,
4602                                            &plane_info->plane_size,
4603                                            &plane_info->dcc, address, tmz_surface,
4604                                            force_disable_dcc);
4605         if (ret)
4606                 return ret;
4607
4608         fill_blending_from_plane_state(
4609                 plane_state, &plane_info->per_pixel_alpha,
4610                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4611
4612         return 0;
4613 }
4614
4615 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4616                                     struct dc_plane_state *dc_plane_state,
4617                                     struct drm_plane_state *plane_state,
4618                                     struct drm_crtc_state *crtc_state)
4619 {
4620         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4621         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4622         struct dc_scaling_info scaling_info;
4623         struct dc_plane_info plane_info;
4624         int ret;
4625         bool force_disable_dcc = false;
4626
4627         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4628         if (ret)
4629                 return ret;
4630
4631         dc_plane_state->src_rect = scaling_info.src_rect;
4632         dc_plane_state->dst_rect = scaling_info.dst_rect;
4633         dc_plane_state->clip_rect = scaling_info.clip_rect;
4634         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4635
4636         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4637         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4638                                           afb->tiling_flags,
4639                                           &plane_info,
4640                                           &dc_plane_state->address,
4641                                           afb->tmz_surface,
4642                                           force_disable_dcc);
4643         if (ret)
4644                 return ret;
4645
4646         dc_plane_state->format = plane_info.format;
4647         dc_plane_state->color_space = plane_info.color_space;
4648         dc_plane_state->format = plane_info.format;
4649         dc_plane_state->plane_size = plane_info.plane_size;
4650         dc_plane_state->rotation = plane_info.rotation;
4651         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4652         dc_plane_state->stereo_format = plane_info.stereo_format;
4653         dc_plane_state->tiling_info = plane_info.tiling_info;
4654         dc_plane_state->visible = plane_info.visible;
4655         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4656         dc_plane_state->global_alpha = plane_info.global_alpha;
4657         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4658         dc_plane_state->dcc = plane_info.dcc;
4659         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4660
4661         /*
4662          * Always set input transfer function, since plane state is refreshed
4663          * every time.
4664          */
4665         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4666         if (ret)
4667                 return ret;
4668
4669         return 0;
4670 }
4671
4672 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4673                                            const struct dm_connector_state *dm_state,
4674                                            struct dc_stream_state *stream)
4675 {
4676         enum amdgpu_rmx_type rmx_type;
4677
4678         struct rect src = { 0 }; /* viewport in composition space*/
4679         struct rect dst = { 0 }; /* stream addressable area */
4680
4681         /* no mode. nothing to be done */
4682         if (!mode)
4683                 return;
4684
4685         /* Full screen scaling by default */
4686         src.width = mode->hdisplay;
4687         src.height = mode->vdisplay;
4688         dst.width = stream->timing.h_addressable;
4689         dst.height = stream->timing.v_addressable;
4690
4691         if (dm_state) {
4692                 rmx_type = dm_state->scaling;
4693                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4694                         if (src.width * dst.height <
4695                                         src.height * dst.width) {
4696                                 /* height needs less upscaling/more downscaling */
4697                                 dst.width = src.width *
4698                                                 dst.height / src.height;
4699                         } else {
4700                                 /* width needs less upscaling/more downscaling */
4701                                 dst.height = src.height *
4702                                                 dst.width / src.width;
4703                         }
4704                 } else if (rmx_type == RMX_CENTER) {
4705                         dst = src;
4706                 }
4707
4708                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4709                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4710
4711                 if (dm_state->underscan_enable) {
4712                         dst.x += dm_state->underscan_hborder / 2;
4713                         dst.y += dm_state->underscan_vborder / 2;
4714                         dst.width -= dm_state->underscan_hborder;
4715                         dst.height -= dm_state->underscan_vborder;
4716                 }
4717         }
4718
4719         stream->src = src;
4720         stream->dst = dst;
4721
4722         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4723                         dst.x, dst.y, dst.width, dst.height);
4724
4725 }
4726
4727 static enum dc_color_depth
4728 convert_color_depth_from_display_info(const struct drm_connector *connector,
4729                                       bool is_y420, int requested_bpc)
4730 {
4731         uint8_t bpc;
4732
4733         if (is_y420) {
4734                 bpc = 8;
4735
4736                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4737                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4738                         bpc = 16;
4739                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4740                         bpc = 12;
4741                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4742                         bpc = 10;
4743         } else {
4744                 bpc = (uint8_t)connector->display_info.bpc;
4745                 /* Assume 8 bpc by default if no bpc is specified. */
4746                 bpc = bpc ? bpc : 8;
4747         }
4748
4749         if (requested_bpc > 0) {
4750                 /*
4751                  * Cap display bpc based on the user requested value.
4752                  *
4753                  * The value for state->max_bpc may not correctly updated
4754                  * depending on when the connector gets added to the state
4755                  * or if this was called outside of atomic check, so it
4756                  * can't be used directly.
4757                  */
4758                 bpc = min_t(u8, bpc, requested_bpc);
4759
4760                 /* Round down to the nearest even number. */
4761                 bpc = bpc - (bpc & 1);
4762         }
4763
4764         switch (bpc) {
4765         case 0:
4766                 /*
4767                  * Temporary Work around, DRM doesn't parse color depth for
4768                  * EDID revision before 1.4
4769                  * TODO: Fix edid parsing
4770                  */
4771                 return COLOR_DEPTH_888;
4772         case 6:
4773                 return COLOR_DEPTH_666;
4774         case 8:
4775                 return COLOR_DEPTH_888;
4776         case 10:
4777                 return COLOR_DEPTH_101010;
4778         case 12:
4779                 return COLOR_DEPTH_121212;
4780         case 14:
4781                 return COLOR_DEPTH_141414;
4782         case 16:
4783                 return COLOR_DEPTH_161616;
4784         default:
4785                 return COLOR_DEPTH_UNDEFINED;
4786         }
4787 }
4788
4789 static enum dc_aspect_ratio
4790 get_aspect_ratio(const struct drm_display_mode *mode_in)
4791 {
4792         /* 1-1 mapping, since both enums follow the HDMI spec. */
4793         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4794 }
4795
4796 static enum dc_color_space
4797 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4798 {
4799         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4800
4801         switch (dc_crtc_timing->pixel_encoding) {
4802         case PIXEL_ENCODING_YCBCR422:
4803         case PIXEL_ENCODING_YCBCR444:
4804         case PIXEL_ENCODING_YCBCR420:
4805         {
4806                 /*
4807                  * 27030khz is the separation point between HDTV and SDTV
4808                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4809                  * respectively
4810                  */
4811                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4812                         if (dc_crtc_timing->flags.Y_ONLY)
4813                                 color_space =
4814                                         COLOR_SPACE_YCBCR709_LIMITED;
4815                         else
4816                                 color_space = COLOR_SPACE_YCBCR709;
4817                 } else {
4818                         if (dc_crtc_timing->flags.Y_ONLY)
4819                                 color_space =
4820                                         COLOR_SPACE_YCBCR601_LIMITED;
4821                         else
4822                                 color_space = COLOR_SPACE_YCBCR601;
4823                 }
4824
4825         }
4826         break;
4827         case PIXEL_ENCODING_RGB:
4828                 color_space = COLOR_SPACE_SRGB;
4829                 break;
4830
4831         default:
4832                 WARN_ON(1);
4833                 break;
4834         }
4835
4836         return color_space;
4837 }
4838
4839 static bool adjust_colour_depth_from_display_info(
4840         struct dc_crtc_timing *timing_out,
4841         const struct drm_display_info *info)
4842 {
4843         enum dc_color_depth depth = timing_out->display_color_depth;
4844         int normalized_clk;
4845         do {
4846                 normalized_clk = timing_out->pix_clk_100hz / 10;
4847                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4848                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4849                         normalized_clk /= 2;
4850                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4851                 switch (depth) {
4852                 case COLOR_DEPTH_888:
4853                         break;
4854                 case COLOR_DEPTH_101010:
4855                         normalized_clk = (normalized_clk * 30) / 24;
4856                         break;
4857                 case COLOR_DEPTH_121212:
4858                         normalized_clk = (normalized_clk * 36) / 24;
4859                         break;
4860                 case COLOR_DEPTH_161616:
4861                         normalized_clk = (normalized_clk * 48) / 24;
4862                         break;
4863                 default:
4864                         /* The above depths are the only ones valid for HDMI. */
4865                         return false;
4866                 }
4867                 if (normalized_clk <= info->max_tmds_clock) {
4868                         timing_out->display_color_depth = depth;
4869                         return true;
4870                 }
4871         } while (--depth > COLOR_DEPTH_666);
4872         return false;
4873 }
4874
4875 static void fill_stream_properties_from_drm_display_mode(
4876         struct dc_stream_state *stream,
4877         const struct drm_display_mode *mode_in,
4878         const struct drm_connector *connector,
4879         const struct drm_connector_state *connector_state,
4880         const struct dc_stream_state *old_stream,
4881         int requested_bpc)
4882 {
4883         struct dc_crtc_timing *timing_out = &stream->timing;
4884         const struct drm_display_info *info = &connector->display_info;
4885         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4886         struct hdmi_vendor_infoframe hv_frame;
4887         struct hdmi_avi_infoframe avi_frame;
4888
4889         memset(&hv_frame, 0, sizeof(hv_frame));
4890         memset(&avi_frame, 0, sizeof(avi_frame));
4891
4892         timing_out->h_border_left = 0;
4893         timing_out->h_border_right = 0;
4894         timing_out->v_border_top = 0;
4895         timing_out->v_border_bottom = 0;
4896         /* TODO: un-hardcode */
4897         if (drm_mode_is_420_only(info, mode_in)
4898                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4899                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4900         else if (drm_mode_is_420_also(info, mode_in)
4901                         && aconnector->force_yuv420_output)
4902                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4903         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4904                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4905                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4906         else
4907                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4908
4909         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4910         timing_out->display_color_depth = convert_color_depth_from_display_info(
4911                 connector,
4912                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4913                 requested_bpc);
4914         timing_out->scan_type = SCANNING_TYPE_NODATA;
4915         timing_out->hdmi_vic = 0;
4916
4917         if(old_stream) {
4918                 timing_out->vic = old_stream->timing.vic;
4919                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4920                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4921         } else {
4922                 timing_out->vic = drm_match_cea_mode(mode_in);
4923                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4924                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4925                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4926                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4927         }
4928
4929         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4930                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4931                 timing_out->vic = avi_frame.video_code;
4932                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4933                 timing_out->hdmi_vic = hv_frame.vic;
4934         }
4935
4936         timing_out->h_addressable = mode_in->crtc_hdisplay;
4937         timing_out->h_total = mode_in->crtc_htotal;
4938         timing_out->h_sync_width =
4939                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4940         timing_out->h_front_porch =
4941                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4942         timing_out->v_total = mode_in->crtc_vtotal;
4943         timing_out->v_addressable = mode_in->crtc_vdisplay;
4944         timing_out->v_front_porch =
4945                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4946         timing_out->v_sync_width =
4947                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4948         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4949         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4950
4951         stream->output_color_space = get_output_color_space(timing_out);
4952
4953         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4954         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4955         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4956                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4957                     drm_mode_is_420_also(info, mode_in) &&
4958                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4959                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4960                         adjust_colour_depth_from_display_info(timing_out, info);
4961                 }
4962         }
4963 }
4964
4965 static void fill_audio_info(struct audio_info *audio_info,
4966                             const struct drm_connector *drm_connector,
4967                             const struct dc_sink *dc_sink)
4968 {
4969         int i = 0;
4970         int cea_revision = 0;
4971         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4972
4973         audio_info->manufacture_id = edid_caps->manufacturer_id;
4974         audio_info->product_id = edid_caps->product_id;
4975
4976         cea_revision = drm_connector->display_info.cea_rev;
4977
4978         strscpy(audio_info->display_name,
4979                 edid_caps->display_name,
4980                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4981
4982         if (cea_revision >= 3) {
4983                 audio_info->mode_count = edid_caps->audio_mode_count;
4984
4985                 for (i = 0; i < audio_info->mode_count; ++i) {
4986                         audio_info->modes[i].format_code =
4987                                         (enum audio_format_code)
4988                                         (edid_caps->audio_modes[i].format_code);
4989                         audio_info->modes[i].channel_count =
4990                                         edid_caps->audio_modes[i].channel_count;
4991                         audio_info->modes[i].sample_rates.all =
4992                                         edid_caps->audio_modes[i].sample_rate;
4993                         audio_info->modes[i].sample_size =
4994                                         edid_caps->audio_modes[i].sample_size;
4995                 }
4996         }
4997
4998         audio_info->flags.all = edid_caps->speaker_flags;
4999
5000         /* TODO: We only check for the progressive mode, check for interlace mode too */
5001         if (drm_connector->latency_present[0]) {
5002                 audio_info->video_latency = drm_connector->video_latency[0];
5003                 audio_info->audio_latency = drm_connector->audio_latency[0];
5004         }
5005
5006         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5007
5008 }
5009
5010 static void
5011 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5012                                       struct drm_display_mode *dst_mode)
5013 {
5014         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5015         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5016         dst_mode->crtc_clock = src_mode->crtc_clock;
5017         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5018         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5019         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5020         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5021         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5022         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5023         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5024         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5025         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5026         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5027         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5028 }
5029
5030 static void
5031 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5032                                         const struct drm_display_mode *native_mode,
5033                                         bool scale_enabled)
5034 {
5035         if (scale_enabled) {
5036                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5037         } else if (native_mode->clock == drm_mode->clock &&
5038                         native_mode->htotal == drm_mode->htotal &&
5039                         native_mode->vtotal == drm_mode->vtotal) {
5040                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5041         } else {
5042                 /* no scaling nor amdgpu inserted, no need to patch */
5043         }
5044 }
5045
5046 static struct dc_sink *
5047 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5048 {
5049         struct dc_sink_init_data sink_init_data = { 0 };
5050         struct dc_sink *sink = NULL;
5051         sink_init_data.link = aconnector->dc_link;
5052         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5053
5054         sink = dc_sink_create(&sink_init_data);
5055         if (!sink) {
5056                 DRM_ERROR("Failed to create sink!\n");
5057                 return NULL;
5058         }
5059         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5060
5061         return sink;
5062 }
5063
5064 static void set_multisync_trigger_params(
5065                 struct dc_stream_state *stream)
5066 {
5067         if (stream->triggered_crtc_reset.enabled) {
5068                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5069                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5070         }
5071 }
5072
5073 static void set_master_stream(struct dc_stream_state *stream_set[],
5074                               int stream_count)
5075 {
5076         int j, highest_rfr = 0, master_stream = 0;
5077
5078         for (j = 0;  j < stream_count; j++) {
5079                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5080                         int refresh_rate = 0;
5081
5082                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5083                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5084                         if (refresh_rate > highest_rfr) {
5085                                 highest_rfr = refresh_rate;
5086                                 master_stream = j;
5087                         }
5088                 }
5089         }
5090         for (j = 0;  j < stream_count; j++) {
5091                 if (stream_set[j])
5092                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5093         }
5094 }
5095
5096 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5097 {
5098         int i = 0;
5099
5100         if (context->stream_count < 2)
5101                 return;
5102         for (i = 0; i < context->stream_count ; i++) {
5103                 if (!context->streams[i])
5104                         continue;
5105                 /*
5106                  * TODO: add a function to read AMD VSDB bits and set
5107                  * crtc_sync_master.multi_sync_enabled flag
5108                  * For now it's set to false
5109                  */
5110                 set_multisync_trigger_params(context->streams[i]);
5111         }
5112         set_master_stream(context->streams, context->stream_count);
5113 }
5114
5115 static struct dc_stream_state *
5116 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5117                        const struct drm_display_mode *drm_mode,
5118                        const struct dm_connector_state *dm_state,
5119                        const struct dc_stream_state *old_stream,
5120                        int requested_bpc)
5121 {
5122         struct drm_display_mode *preferred_mode = NULL;
5123         struct drm_connector *drm_connector;
5124         const struct drm_connector_state *con_state =
5125                 dm_state ? &dm_state->base : NULL;
5126         struct dc_stream_state *stream = NULL;
5127         struct drm_display_mode mode = *drm_mode;
5128         bool native_mode_found = false;
5129         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5130         int mode_refresh;
5131         int preferred_refresh = 0;
5132 #if defined(CONFIG_DRM_AMD_DC_DCN)
5133         struct dsc_dec_dpcd_caps dsc_caps;
5134         uint32_t link_bandwidth_kbps;
5135 #endif
5136         struct dc_sink *sink = NULL;
5137         if (aconnector == NULL) {
5138                 DRM_ERROR("aconnector is NULL!\n");
5139                 return stream;
5140         }
5141
5142         drm_connector = &aconnector->base;
5143
5144         if (!aconnector->dc_sink) {
5145                 sink = create_fake_sink(aconnector);
5146                 if (!sink)
5147                         return stream;
5148         } else {
5149                 sink = aconnector->dc_sink;
5150                 dc_sink_retain(sink);
5151         }
5152
5153         stream = dc_create_stream_for_sink(sink);
5154
5155         if (stream == NULL) {
5156                 DRM_ERROR("Failed to create stream for sink!\n");
5157                 goto finish;
5158         }
5159
5160         stream->dm_stream_context = aconnector;
5161
5162         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5163                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5164
5165         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5166                 /* Search for preferred mode */
5167                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5168                         native_mode_found = true;
5169                         break;
5170                 }
5171         }
5172         if (!native_mode_found)
5173                 preferred_mode = list_first_entry_or_null(
5174                                 &aconnector->base.modes,
5175                                 struct drm_display_mode,
5176                                 head);
5177
5178         mode_refresh = drm_mode_vrefresh(&mode);
5179
5180         if (preferred_mode == NULL) {
5181                 /*
5182                  * This may not be an error, the use case is when we have no
5183                  * usermode calls to reset and set mode upon hotplug. In this
5184                  * case, we call set mode ourselves to restore the previous mode
5185                  * and the modelist may not be filled in in time.
5186                  */
5187                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5188         } else {
5189                 decide_crtc_timing_for_drm_display_mode(
5190                                 &mode, preferred_mode,
5191                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5192                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5193         }
5194
5195         if (!dm_state)
5196                 drm_mode_set_crtcinfo(&mode, 0);
5197
5198         /*
5199         * If scaling is enabled and refresh rate didn't change
5200         * we copy the vic and polarities of the old timings
5201         */
5202         if (!scale || mode_refresh != preferred_refresh)
5203                 fill_stream_properties_from_drm_display_mode(stream,
5204                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
5205         else
5206                 fill_stream_properties_from_drm_display_mode(stream,
5207                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5208
5209         stream->timing.flags.DSC = 0;
5210
5211         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5212 #if defined(CONFIG_DRM_AMD_DC_DCN)
5213                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5214                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5215                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5216                                       &dsc_caps);
5217                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5218                                                              dc_link_get_link_cap(aconnector->dc_link));
5219
5220                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5221                         /* Set DSC policy according to dsc_clock_en */
5222                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5223                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5224
5225                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5226                                                   &dsc_caps,
5227                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5228                                                   0,
5229                                                   link_bandwidth_kbps,
5230                                                   &stream->timing,
5231                                                   &stream->timing.dsc_cfg))
5232                                 stream->timing.flags.DSC = 1;
5233                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5234                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5235                                 stream->timing.flags.DSC = 1;
5236
5237                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5238                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5239
5240                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5241                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5242
5243                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5244                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5245                 }
5246 #endif
5247         }
5248
5249         update_stream_scaling_settings(&mode, dm_state, stream);
5250
5251         fill_audio_info(
5252                 &stream->audio_info,
5253                 drm_connector,
5254                 sink);
5255
5256         update_stream_signal(stream, sink);
5257
5258         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5259                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5260
5261         if (stream->link->psr_settings.psr_feature_enabled) {
5262                 //
5263                 // should decide stream support vsc sdp colorimetry capability
5264                 // before building vsc info packet
5265                 //
5266                 stream->use_vsc_sdp_for_colorimetry = false;
5267                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5268                         stream->use_vsc_sdp_for_colorimetry =
5269                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5270                 } else {
5271                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5272                                 stream->use_vsc_sdp_for_colorimetry = true;
5273                 }
5274                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5275         }
5276 finish:
5277         dc_sink_release(sink);
5278
5279         return stream;
5280 }
5281
5282 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5283 {
5284         drm_crtc_cleanup(crtc);
5285         kfree(crtc);
5286 }
5287
5288 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5289                                   struct drm_crtc_state *state)
5290 {
5291         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5292
5293         /* TODO Destroy dc_stream objects are stream object is flattened */
5294         if (cur->stream)
5295                 dc_stream_release(cur->stream);
5296
5297
5298         __drm_atomic_helper_crtc_destroy_state(state);
5299
5300
5301         kfree(state);
5302 }
5303
5304 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5305 {
5306         struct dm_crtc_state *state;
5307
5308         if (crtc->state)
5309                 dm_crtc_destroy_state(crtc, crtc->state);
5310
5311         state = kzalloc(sizeof(*state), GFP_KERNEL);
5312         if (WARN_ON(!state))
5313                 return;
5314
5315         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5316 }
5317
5318 static struct drm_crtc_state *
5319 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5320 {
5321         struct dm_crtc_state *state, *cur;
5322
5323         cur = to_dm_crtc_state(crtc->state);
5324
5325         if (WARN_ON(!crtc->state))
5326                 return NULL;
5327
5328         state = kzalloc(sizeof(*state), GFP_KERNEL);
5329         if (!state)
5330                 return NULL;
5331
5332         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5333
5334         if (cur->stream) {
5335                 state->stream = cur->stream;
5336                 dc_stream_retain(state->stream);
5337         }
5338
5339         state->active_planes = cur->active_planes;
5340         state->vrr_infopacket = cur->vrr_infopacket;
5341         state->abm_level = cur->abm_level;
5342         state->vrr_supported = cur->vrr_supported;
5343         state->freesync_config = cur->freesync_config;
5344         state->crc_src = cur->crc_src;
5345         state->cm_has_degamma = cur->cm_has_degamma;
5346         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5347
5348         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5349
5350         return &state->base;
5351 }
5352
5353 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5354 {
5355         enum dc_irq_source irq_source;
5356         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5357         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5358         int rc;
5359
5360         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5361
5362         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5363
5364         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5365                          acrtc->crtc_id, enable ? "en" : "dis", rc);
5366         return rc;
5367 }
5368
5369 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5370 {
5371         enum dc_irq_source irq_source;
5372         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5373         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5374         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5375         struct amdgpu_display_manager *dm = &adev->dm;
5376         int rc = 0;
5377
5378         if (enable) {
5379                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5380                 if (amdgpu_dm_vrr_active(acrtc_state))
5381                         rc = dm_set_vupdate_irq(crtc, true);
5382         } else {
5383                 /* vblank irq off -> vupdate irq off */
5384                 rc = dm_set_vupdate_irq(crtc, false);
5385         }
5386
5387         if (rc)
5388                 return rc;
5389
5390         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5391
5392         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5393                 return -EBUSY;
5394
5395 #if defined(CONFIG_DRM_AMD_DC_DCN)
5396         if (amdgpu_in_reset(adev))
5397                 return 0;
5398
5399         mutex_lock(&dm->dc_lock);
5400
5401         if (enable)
5402                 dm->active_vblank_irq_count++;
5403         else
5404                 dm->active_vblank_irq_count--;
5405
5406 #if defined(CONFIG_DRM_AMD_DC_DCN)
5407         dc_allow_idle_optimizations(
5408                 adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
5409
5410         DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
5411 #endif
5412
5413         mutex_unlock(&dm->dc_lock);
5414
5415 #endif
5416         return 0;
5417 }
5418
5419 static int dm_enable_vblank(struct drm_crtc *crtc)
5420 {
5421         return dm_set_vblank(crtc, true);
5422 }
5423
5424 static void dm_disable_vblank(struct drm_crtc *crtc)
5425 {
5426         dm_set_vblank(crtc, false);
5427 }
5428
5429 /* Implemented only the options currently availible for the driver */
5430 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5431         .reset = dm_crtc_reset_state,
5432         .destroy = amdgpu_dm_crtc_destroy,
5433         .set_config = drm_atomic_helper_set_config,
5434         .page_flip = drm_atomic_helper_page_flip,
5435         .atomic_duplicate_state = dm_crtc_duplicate_state,
5436         .atomic_destroy_state = dm_crtc_destroy_state,
5437         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5438         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5439         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5440         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5441         .enable_vblank = dm_enable_vblank,
5442         .disable_vblank = dm_disable_vblank,
5443         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5444 };
5445
5446 static enum drm_connector_status
5447 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5448 {
5449         bool connected;
5450         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5451
5452         /*
5453          * Notes:
5454          * 1. This interface is NOT called in context of HPD irq.
5455          * 2. This interface *is called* in context of user-mode ioctl. Which
5456          * makes it a bad place for *any* MST-related activity.
5457          */
5458
5459         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5460             !aconnector->fake_enable)
5461                 connected = (aconnector->dc_sink != NULL);
5462         else
5463                 connected = (aconnector->base.force == DRM_FORCE_ON);
5464
5465         update_subconnector_property(aconnector);
5466
5467         return (connected ? connector_status_connected :
5468                         connector_status_disconnected);
5469 }
5470
5471 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5472                                             struct drm_connector_state *connector_state,
5473                                             struct drm_property *property,
5474                                             uint64_t val)
5475 {
5476         struct drm_device *dev = connector->dev;
5477         struct amdgpu_device *adev = drm_to_adev(dev);
5478         struct dm_connector_state *dm_old_state =
5479                 to_dm_connector_state(connector->state);
5480         struct dm_connector_state *dm_new_state =
5481                 to_dm_connector_state(connector_state);
5482
5483         int ret = -EINVAL;
5484
5485         if (property == dev->mode_config.scaling_mode_property) {
5486                 enum amdgpu_rmx_type rmx_type;
5487
5488                 switch (val) {
5489                 case DRM_MODE_SCALE_CENTER:
5490                         rmx_type = RMX_CENTER;
5491                         break;
5492                 case DRM_MODE_SCALE_ASPECT:
5493                         rmx_type = RMX_ASPECT;
5494                         break;
5495                 case DRM_MODE_SCALE_FULLSCREEN:
5496                         rmx_type = RMX_FULL;
5497                         break;
5498                 case DRM_MODE_SCALE_NONE:
5499                 default:
5500                         rmx_type = RMX_OFF;
5501                         break;
5502                 }
5503
5504                 if (dm_old_state->scaling == rmx_type)
5505                         return 0;
5506
5507                 dm_new_state->scaling = rmx_type;
5508                 ret = 0;
5509         } else if (property == adev->mode_info.underscan_hborder_property) {
5510                 dm_new_state->underscan_hborder = val;
5511                 ret = 0;
5512         } else if (property == adev->mode_info.underscan_vborder_property) {
5513                 dm_new_state->underscan_vborder = val;
5514                 ret = 0;
5515         } else if (property == adev->mode_info.underscan_property) {
5516                 dm_new_state->underscan_enable = val;
5517                 ret = 0;
5518         } else if (property == adev->mode_info.abm_level_property) {
5519                 dm_new_state->abm_level = val;
5520                 ret = 0;
5521         }
5522
5523         return ret;
5524 }
5525
5526 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5527                                             const struct drm_connector_state *state,
5528                                             struct drm_property *property,
5529                                             uint64_t *val)
5530 {
5531         struct drm_device *dev = connector->dev;
5532         struct amdgpu_device *adev = drm_to_adev(dev);
5533         struct dm_connector_state *dm_state =
5534                 to_dm_connector_state(state);
5535         int ret = -EINVAL;
5536
5537         if (property == dev->mode_config.scaling_mode_property) {
5538                 switch (dm_state->scaling) {
5539                 case RMX_CENTER:
5540                         *val = DRM_MODE_SCALE_CENTER;
5541                         break;
5542                 case RMX_ASPECT:
5543                         *val = DRM_MODE_SCALE_ASPECT;
5544                         break;
5545                 case RMX_FULL:
5546                         *val = DRM_MODE_SCALE_FULLSCREEN;
5547                         break;
5548                 case RMX_OFF:
5549                 default:
5550                         *val = DRM_MODE_SCALE_NONE;
5551                         break;
5552                 }
5553                 ret = 0;
5554         } else if (property == adev->mode_info.underscan_hborder_property) {
5555                 *val = dm_state->underscan_hborder;
5556                 ret = 0;
5557         } else if (property == adev->mode_info.underscan_vborder_property) {
5558                 *val = dm_state->underscan_vborder;
5559                 ret = 0;
5560         } else if (property == adev->mode_info.underscan_property) {
5561                 *val = dm_state->underscan_enable;
5562                 ret = 0;
5563         } else if (property == adev->mode_info.abm_level_property) {
5564                 *val = dm_state->abm_level;
5565                 ret = 0;
5566         }
5567
5568         return ret;
5569 }
5570
5571 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5572 {
5573         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5574
5575         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5576 }
5577
5578 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5579 {
5580         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5581         const struct dc_link *link = aconnector->dc_link;
5582         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5583         struct amdgpu_display_manager *dm = &adev->dm;
5584
5585         /*
5586          * Call only if mst_mgr was iniitalized before since it's not done
5587          * for all connector types.
5588          */
5589         if (aconnector->mst_mgr.dev)
5590                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5591
5592 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5593         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5594
5595         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5596             link->type != dc_connection_none &&
5597             dm->backlight_dev) {
5598                 backlight_device_unregister(dm->backlight_dev);
5599                 dm->backlight_dev = NULL;
5600         }
5601 #endif
5602
5603         if (aconnector->dc_em_sink)
5604                 dc_sink_release(aconnector->dc_em_sink);
5605         aconnector->dc_em_sink = NULL;
5606         if (aconnector->dc_sink)
5607                 dc_sink_release(aconnector->dc_sink);
5608         aconnector->dc_sink = NULL;
5609
5610         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5611         drm_connector_unregister(connector);
5612         drm_connector_cleanup(connector);
5613         if (aconnector->i2c) {
5614                 i2c_del_adapter(&aconnector->i2c->base);
5615                 kfree(aconnector->i2c);
5616         }
5617         kfree(aconnector->dm_dp_aux.aux.name);
5618
5619         kfree(connector);
5620 }
5621
5622 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5623 {
5624         struct dm_connector_state *state =
5625                 to_dm_connector_state(connector->state);
5626
5627         if (connector->state)
5628                 __drm_atomic_helper_connector_destroy_state(connector->state);
5629
5630         kfree(state);
5631
5632         state = kzalloc(sizeof(*state), GFP_KERNEL);
5633
5634         if (state) {
5635                 state->scaling = RMX_OFF;
5636                 state->underscan_enable = false;
5637                 state->underscan_hborder = 0;
5638                 state->underscan_vborder = 0;
5639                 state->base.max_requested_bpc = 8;
5640                 state->vcpi_slots = 0;
5641                 state->pbn = 0;
5642                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5643                         state->abm_level = amdgpu_dm_abm_level;
5644
5645                 __drm_atomic_helper_connector_reset(connector, &state->base);
5646         }
5647 }
5648
5649 struct drm_connector_state *
5650 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5651 {
5652         struct dm_connector_state *state =
5653                 to_dm_connector_state(connector->state);
5654
5655         struct dm_connector_state *new_state =
5656                         kmemdup(state, sizeof(*state), GFP_KERNEL);
5657
5658         if (!new_state)
5659                 return NULL;
5660
5661         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5662
5663         new_state->freesync_capable = state->freesync_capable;
5664         new_state->abm_level = state->abm_level;
5665         new_state->scaling = state->scaling;
5666         new_state->underscan_enable = state->underscan_enable;
5667         new_state->underscan_hborder = state->underscan_hborder;
5668         new_state->underscan_vborder = state->underscan_vborder;
5669         new_state->vcpi_slots = state->vcpi_slots;
5670         new_state->pbn = state->pbn;
5671         return &new_state->base;
5672 }
5673
5674 static int
5675 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5676 {
5677         struct amdgpu_dm_connector *amdgpu_dm_connector =
5678                 to_amdgpu_dm_connector(connector);
5679         int r;
5680
5681         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5682             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5683                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5684                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5685                 if (r)
5686                         return r;
5687         }
5688
5689 #if defined(CONFIG_DEBUG_FS)
5690         connector_debugfs_init(amdgpu_dm_connector);
5691 #endif
5692
5693         return 0;
5694 }
5695
5696 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5697         .reset = amdgpu_dm_connector_funcs_reset,
5698         .detect = amdgpu_dm_connector_detect,
5699         .fill_modes = drm_helper_probe_single_connector_modes,
5700         .destroy = amdgpu_dm_connector_destroy,
5701         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5702         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5703         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5704         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5705         .late_register = amdgpu_dm_connector_late_register,
5706         .early_unregister = amdgpu_dm_connector_unregister
5707 };
5708
5709 static int get_modes(struct drm_connector *connector)
5710 {
5711         return amdgpu_dm_connector_get_modes(connector);
5712 }
5713
5714 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5715 {
5716         struct dc_sink_init_data init_params = {
5717                         .link = aconnector->dc_link,
5718                         .sink_signal = SIGNAL_TYPE_VIRTUAL
5719         };
5720         struct edid *edid;
5721
5722         if (!aconnector->base.edid_blob_ptr) {
5723                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5724                                 aconnector->base.name);
5725
5726                 aconnector->base.force = DRM_FORCE_OFF;
5727                 aconnector->base.override_edid = false;
5728                 return;
5729         }
5730
5731         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5732
5733         aconnector->edid = edid;
5734
5735         aconnector->dc_em_sink = dc_link_add_remote_sink(
5736                 aconnector->dc_link,
5737                 (uint8_t *)edid,
5738                 (edid->extensions + 1) * EDID_LENGTH,
5739                 &init_params);
5740
5741         if (aconnector->base.force == DRM_FORCE_ON) {
5742                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5743                 aconnector->dc_link->local_sink :
5744                 aconnector->dc_em_sink;
5745                 dc_sink_retain(aconnector->dc_sink);
5746         }
5747 }
5748
5749 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5750 {
5751         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5752
5753         /*
5754          * In case of headless boot with force on for DP managed connector
5755          * Those settings have to be != 0 to get initial modeset
5756          */
5757         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5758                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5759                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5760         }
5761
5762
5763         aconnector->base.override_edid = true;
5764         create_eml_sink(aconnector);
5765 }
5766
5767 static struct dc_stream_state *
5768 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5769                                 const struct drm_display_mode *drm_mode,
5770                                 const struct dm_connector_state *dm_state,
5771                                 const struct dc_stream_state *old_stream)
5772 {
5773         struct drm_connector *connector = &aconnector->base;
5774         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5775         struct dc_stream_state *stream;
5776         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5777         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5778         enum dc_status dc_result = DC_OK;
5779
5780         do {
5781                 stream = create_stream_for_sink(aconnector, drm_mode,
5782                                                 dm_state, old_stream,
5783                                                 requested_bpc);
5784                 if (stream == NULL) {
5785                         DRM_ERROR("Failed to create stream for sink!\n");
5786                         break;
5787                 }
5788
5789                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5790
5791                 if (dc_result != DC_OK) {
5792                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5793                                       drm_mode->hdisplay,
5794                                       drm_mode->vdisplay,
5795                                       drm_mode->clock,
5796                                       dc_result,
5797                                       dc_status_to_str(dc_result));
5798
5799                         dc_stream_release(stream);
5800                         stream = NULL;
5801                         requested_bpc -= 2; /* lower bpc to retry validation */
5802                 }
5803
5804         } while (stream == NULL && requested_bpc >= 6);
5805
5806         return stream;
5807 }
5808
5809 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5810                                    struct drm_display_mode *mode)
5811 {
5812         int result = MODE_ERROR;
5813         struct dc_sink *dc_sink;
5814         /* TODO: Unhardcode stream count */
5815         struct dc_stream_state *stream;
5816         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5817
5818         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5819                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5820                 return result;
5821
5822         /*
5823          * Only run this the first time mode_valid is called to initilialize
5824          * EDID mgmt
5825          */
5826         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5827                 !aconnector->dc_em_sink)
5828                 handle_edid_mgmt(aconnector);
5829
5830         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5831
5832         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5833                                 aconnector->base.force != DRM_FORCE_ON) {
5834                 DRM_ERROR("dc_sink is NULL!\n");
5835                 goto fail;
5836         }
5837
5838         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5839         if (stream) {
5840                 dc_stream_release(stream);
5841                 result = MODE_OK;
5842         }
5843
5844 fail:
5845         /* TODO: error handling*/
5846         return result;
5847 }
5848
5849 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5850                                 struct dc_info_packet *out)
5851 {
5852         struct hdmi_drm_infoframe frame;
5853         unsigned char buf[30]; /* 26 + 4 */
5854         ssize_t len;
5855         int ret, i;
5856
5857         memset(out, 0, sizeof(*out));
5858
5859         if (!state->hdr_output_metadata)
5860                 return 0;
5861
5862         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5863         if (ret)
5864                 return ret;
5865
5866         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5867         if (len < 0)
5868                 return (int)len;
5869
5870         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5871         if (len != 30)
5872                 return -EINVAL;
5873
5874         /* Prepare the infopacket for DC. */
5875         switch (state->connector->connector_type) {
5876         case DRM_MODE_CONNECTOR_HDMIA:
5877                 out->hb0 = 0x87; /* type */
5878                 out->hb1 = 0x01; /* version */
5879                 out->hb2 = 0x1A; /* length */
5880                 out->sb[0] = buf[3]; /* checksum */
5881                 i = 1;
5882                 break;
5883
5884         case DRM_MODE_CONNECTOR_DisplayPort:
5885         case DRM_MODE_CONNECTOR_eDP:
5886                 out->hb0 = 0x00; /* sdp id, zero */
5887                 out->hb1 = 0x87; /* type */
5888                 out->hb2 = 0x1D; /* payload len - 1 */
5889                 out->hb3 = (0x13 << 2); /* sdp version */
5890                 out->sb[0] = 0x01; /* version */
5891                 out->sb[1] = 0x1A; /* length */
5892                 i = 2;
5893                 break;
5894
5895         default:
5896                 return -EINVAL;
5897         }
5898
5899         memcpy(&out->sb[i], &buf[4], 26);
5900         out->valid = true;
5901
5902         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5903                        sizeof(out->sb), false);
5904
5905         return 0;
5906 }
5907
5908 static bool
5909 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5910                           const struct drm_connector_state *new_state)
5911 {
5912         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5913         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5914
5915         if (old_blob != new_blob) {
5916                 if (old_blob && new_blob &&
5917                     old_blob->length == new_blob->length)
5918                         return memcmp(old_blob->data, new_blob->data,
5919                                       old_blob->length);
5920
5921                 return true;
5922         }
5923
5924         return false;
5925 }
5926
5927 static int
5928 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5929                                  struct drm_atomic_state *state)
5930 {
5931         struct drm_connector_state *new_con_state =
5932                 drm_atomic_get_new_connector_state(state, conn);
5933         struct drm_connector_state *old_con_state =
5934                 drm_atomic_get_old_connector_state(state, conn);
5935         struct drm_crtc *crtc = new_con_state->crtc;
5936         struct drm_crtc_state *new_crtc_state;
5937         int ret;
5938
5939         trace_amdgpu_dm_connector_atomic_check(new_con_state);
5940
5941         if (!crtc)
5942                 return 0;
5943
5944         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5945                 struct dc_info_packet hdr_infopacket;
5946
5947                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5948                 if (ret)
5949                         return ret;
5950
5951                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5952                 if (IS_ERR(new_crtc_state))
5953                         return PTR_ERR(new_crtc_state);
5954
5955                 /*
5956                  * DC considers the stream backends changed if the
5957                  * static metadata changes. Forcing the modeset also
5958                  * gives a simple way for userspace to switch from
5959                  * 8bpc to 10bpc when setting the metadata to enter
5960                  * or exit HDR.
5961                  *
5962                  * Changing the static metadata after it's been
5963                  * set is permissible, however. So only force a
5964                  * modeset if we're entering or exiting HDR.
5965                  */
5966                 new_crtc_state->mode_changed =
5967                         !old_con_state->hdr_output_metadata ||
5968                         !new_con_state->hdr_output_metadata;
5969         }
5970
5971         return 0;
5972 }
5973
5974 static const struct drm_connector_helper_funcs
5975 amdgpu_dm_connector_helper_funcs = {
5976         /*
5977          * If hotplugging a second bigger display in FB Con mode, bigger resolution
5978          * modes will be filtered by drm_mode_validate_size(), and those modes
5979          * are missing after user start lightdm. So we need to renew modes list.
5980          * in get_modes call back, not just return the modes count
5981          */
5982         .get_modes = get_modes,
5983         .mode_valid = amdgpu_dm_connector_mode_valid,
5984         .atomic_check = amdgpu_dm_connector_atomic_check,
5985 };
5986
5987 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5988 {
5989 }
5990
5991 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5992 {
5993         struct drm_atomic_state *state = new_crtc_state->state;
5994         struct drm_plane *plane;
5995         int num_active = 0;
5996
5997         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5998                 struct drm_plane_state *new_plane_state;
5999
6000                 /* Cursor planes are "fake". */
6001                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6002                         continue;
6003
6004                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6005
6006                 if (!new_plane_state) {
6007                         /*
6008                          * The plane is enable on the CRTC and hasn't changed
6009                          * state. This means that it previously passed
6010                          * validation and is therefore enabled.
6011                          */
6012                         num_active += 1;
6013                         continue;
6014                 }
6015
6016                 /* We need a framebuffer to be considered enabled. */
6017                 num_active += (new_plane_state->fb != NULL);
6018         }
6019
6020         return num_active;
6021 }
6022
6023 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6024                                          struct drm_crtc_state *new_crtc_state)
6025 {
6026         struct dm_crtc_state *dm_new_crtc_state =
6027                 to_dm_crtc_state(new_crtc_state);
6028
6029         dm_new_crtc_state->active_planes = 0;
6030
6031         if (!dm_new_crtc_state->stream)
6032                 return;
6033
6034         dm_new_crtc_state->active_planes =
6035                 count_crtc_active_planes(new_crtc_state);
6036 }
6037
6038 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6039                                        struct drm_atomic_state *state)
6040 {
6041         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6042                                                                           crtc);
6043         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6044         struct dc *dc = adev->dm.dc;
6045         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6046         int ret = -EINVAL;
6047
6048         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6049
6050         dm_update_crtc_active_planes(crtc, crtc_state);
6051
6052         if (unlikely(!dm_crtc_state->stream &&
6053                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6054                 WARN_ON(1);
6055                 return ret;
6056         }
6057
6058         /*
6059          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6060          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6061          * planes are disabled, which is not supported by the hardware. And there is legacy
6062          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6063          */
6064         if (crtc_state->enable &&
6065             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6066                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6067                 return -EINVAL;
6068         }
6069
6070         /* In some use cases, like reset, no stream is attached */
6071         if (!dm_crtc_state->stream)
6072                 return 0;
6073
6074         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6075                 return 0;
6076
6077         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6078         return ret;
6079 }
6080
6081 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6082                                       const struct drm_display_mode *mode,
6083                                       struct drm_display_mode *adjusted_mode)
6084 {
6085         return true;
6086 }
6087
6088 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6089         .disable = dm_crtc_helper_disable,
6090         .atomic_check = dm_crtc_helper_atomic_check,
6091         .mode_fixup = dm_crtc_helper_mode_fixup,
6092         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6093 };
6094
6095 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6096 {
6097
6098 }
6099
6100 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6101 {
6102         switch (display_color_depth) {
6103                 case COLOR_DEPTH_666:
6104                         return 6;
6105                 case COLOR_DEPTH_888:
6106                         return 8;
6107                 case COLOR_DEPTH_101010:
6108                         return 10;
6109                 case COLOR_DEPTH_121212:
6110                         return 12;
6111                 case COLOR_DEPTH_141414:
6112                         return 14;
6113                 case COLOR_DEPTH_161616:
6114                         return 16;
6115                 default:
6116                         break;
6117                 }
6118         return 0;
6119 }
6120
6121 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6122                                           struct drm_crtc_state *crtc_state,
6123                                           struct drm_connector_state *conn_state)
6124 {
6125         struct drm_atomic_state *state = crtc_state->state;
6126         struct drm_connector *connector = conn_state->connector;
6127         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6128         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6129         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6130         struct drm_dp_mst_topology_mgr *mst_mgr;
6131         struct drm_dp_mst_port *mst_port;
6132         enum dc_color_depth color_depth;
6133         int clock, bpp = 0;
6134         bool is_y420 = false;
6135
6136         if (!aconnector->port || !aconnector->dc_sink)
6137                 return 0;
6138
6139         mst_port = aconnector->port;
6140         mst_mgr = &aconnector->mst_port->mst_mgr;
6141
6142         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6143                 return 0;
6144
6145         if (!state->duplicated) {
6146                 int max_bpc = conn_state->max_requested_bpc;
6147                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6148                                 aconnector->force_yuv420_output;
6149                 color_depth = convert_color_depth_from_display_info(connector,
6150                                                                     is_y420,
6151                                                                     max_bpc);
6152                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6153                 clock = adjusted_mode->clock;
6154                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6155         }
6156         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6157                                                                            mst_mgr,
6158                                                                            mst_port,
6159                                                                            dm_new_connector_state->pbn,
6160                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6161         if (dm_new_connector_state->vcpi_slots < 0) {
6162                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6163                 return dm_new_connector_state->vcpi_slots;
6164         }
6165         return 0;
6166 }
6167
6168 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6169         .disable = dm_encoder_helper_disable,
6170         .atomic_check = dm_encoder_helper_atomic_check
6171 };
6172
6173 #if defined(CONFIG_DRM_AMD_DC_DCN)
6174 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6175                                             struct dc_state *dc_state)
6176 {
6177         struct dc_stream_state *stream = NULL;
6178         struct drm_connector *connector;
6179         struct drm_connector_state *new_con_state, *old_con_state;
6180         struct amdgpu_dm_connector *aconnector;
6181         struct dm_connector_state *dm_conn_state;
6182         int i, j, clock, bpp;
6183         int vcpi, pbn_div, pbn = 0;
6184
6185         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6186
6187                 aconnector = to_amdgpu_dm_connector(connector);
6188
6189                 if (!aconnector->port)
6190                         continue;
6191
6192                 if (!new_con_state || !new_con_state->crtc)
6193                         continue;
6194
6195                 dm_conn_state = to_dm_connector_state(new_con_state);
6196
6197                 for (j = 0; j < dc_state->stream_count; j++) {
6198                         stream = dc_state->streams[j];
6199                         if (!stream)
6200                                 continue;
6201
6202                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6203                                 break;
6204
6205                         stream = NULL;
6206                 }
6207
6208                 if (!stream)
6209                         continue;
6210
6211                 if (stream->timing.flags.DSC != 1) {
6212                         drm_dp_mst_atomic_enable_dsc(state,
6213                                                      aconnector->port,
6214                                                      dm_conn_state->pbn,
6215                                                      0,
6216                                                      false);
6217                         continue;
6218                 }
6219
6220                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6221                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6222                 clock = stream->timing.pix_clk_100hz / 10;
6223                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6224                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6225                                                     aconnector->port,
6226                                                     pbn, pbn_div,
6227                                                     true);
6228                 if (vcpi < 0)
6229                         return vcpi;
6230
6231                 dm_conn_state->pbn = pbn;
6232                 dm_conn_state->vcpi_slots = vcpi;
6233         }
6234         return 0;
6235 }
6236 #endif
6237
6238 static void dm_drm_plane_reset(struct drm_plane *plane)
6239 {
6240         struct dm_plane_state *amdgpu_state = NULL;
6241
6242         if (plane->state)
6243                 plane->funcs->atomic_destroy_state(plane, plane->state);
6244
6245         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6246         WARN_ON(amdgpu_state == NULL);
6247
6248         if (amdgpu_state)
6249                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6250 }
6251
6252 static struct drm_plane_state *
6253 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6254 {
6255         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6256
6257         old_dm_plane_state = to_dm_plane_state(plane->state);
6258         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6259         if (!dm_plane_state)
6260                 return NULL;
6261
6262         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6263
6264         if (old_dm_plane_state->dc_state) {
6265                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6266                 dc_plane_state_retain(dm_plane_state->dc_state);
6267         }
6268
6269         return &dm_plane_state->base;
6270 }
6271
6272 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6273                                 struct drm_plane_state *state)
6274 {
6275         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6276
6277         if (dm_plane_state->dc_state)
6278                 dc_plane_state_release(dm_plane_state->dc_state);
6279
6280         drm_atomic_helper_plane_destroy_state(plane, state);
6281 }
6282
6283 static const struct drm_plane_funcs dm_plane_funcs = {
6284         .update_plane   = drm_atomic_helper_update_plane,
6285         .disable_plane  = drm_atomic_helper_disable_plane,
6286         .destroy        = drm_primary_helper_destroy,
6287         .reset = dm_drm_plane_reset,
6288         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6289         .atomic_destroy_state = dm_drm_plane_destroy_state,
6290         .format_mod_supported = dm_plane_format_mod_supported,
6291 };
6292
6293 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6294                                       struct drm_plane_state *new_state)
6295 {
6296         struct amdgpu_framebuffer *afb;
6297         struct drm_gem_object *obj;
6298         struct amdgpu_device *adev;
6299         struct amdgpu_bo *rbo;
6300         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6301         struct list_head list;
6302         struct ttm_validate_buffer tv;
6303         struct ww_acquire_ctx ticket;
6304         uint32_t domain;
6305         int r;
6306
6307         if (!new_state->fb) {
6308                 DRM_DEBUG_DRIVER("No FB bound\n");
6309                 return 0;
6310         }
6311
6312         afb = to_amdgpu_framebuffer(new_state->fb);
6313         obj = new_state->fb->obj[0];
6314         rbo = gem_to_amdgpu_bo(obj);
6315         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6316         INIT_LIST_HEAD(&list);
6317
6318         tv.bo = &rbo->tbo;
6319         tv.num_shared = 1;
6320         list_add(&tv.head, &list);
6321
6322         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6323         if (r) {
6324                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6325                 return r;
6326         }
6327
6328         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6329                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6330         else
6331                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6332
6333         r = amdgpu_bo_pin(rbo, domain);
6334         if (unlikely(r != 0)) {
6335                 if (r != -ERESTARTSYS)
6336                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6337                 ttm_eu_backoff_reservation(&ticket, &list);
6338                 return r;
6339         }
6340
6341         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6342         if (unlikely(r != 0)) {
6343                 amdgpu_bo_unpin(rbo);
6344                 ttm_eu_backoff_reservation(&ticket, &list);
6345                 DRM_ERROR("%p bind failed\n", rbo);
6346                 return r;
6347         }
6348
6349         ttm_eu_backoff_reservation(&ticket, &list);
6350
6351         afb->address = amdgpu_bo_gpu_offset(rbo);
6352
6353         amdgpu_bo_ref(rbo);
6354
6355         /**
6356          * We don't do surface updates on planes that have been newly created,
6357          * but we also don't have the afb->address during atomic check.
6358          *
6359          * Fill in buffer attributes depending on the address here, but only on
6360          * newly created planes since they're not being used by DC yet and this
6361          * won't modify global state.
6362          */
6363         dm_plane_state_old = to_dm_plane_state(plane->state);
6364         dm_plane_state_new = to_dm_plane_state(new_state);
6365
6366         if (dm_plane_state_new->dc_state &&
6367             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6368                 struct dc_plane_state *plane_state =
6369                         dm_plane_state_new->dc_state;
6370                 bool force_disable_dcc = !plane_state->dcc.enable;
6371
6372                 fill_plane_buffer_attributes(
6373                         adev, afb, plane_state->format, plane_state->rotation,
6374                         afb->tiling_flags,
6375                         &plane_state->tiling_info, &plane_state->plane_size,
6376                         &plane_state->dcc, &plane_state->address,
6377                         afb->tmz_surface, force_disable_dcc);
6378         }
6379
6380         return 0;
6381 }
6382
6383 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6384                                        struct drm_plane_state *old_state)
6385 {
6386         struct amdgpu_bo *rbo;
6387         int r;
6388
6389         if (!old_state->fb)
6390                 return;
6391
6392         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6393         r = amdgpu_bo_reserve(rbo, false);
6394         if (unlikely(r)) {
6395                 DRM_ERROR("failed to reserve rbo before unpin\n");
6396                 return;
6397         }
6398
6399         amdgpu_bo_unpin(rbo);
6400         amdgpu_bo_unreserve(rbo);
6401         amdgpu_bo_unref(&rbo);
6402 }
6403
6404 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6405                                        struct drm_crtc_state *new_crtc_state)
6406 {
6407         struct drm_framebuffer *fb = state->fb;
6408         int min_downscale, max_upscale;
6409         int min_scale = 0;
6410         int max_scale = INT_MAX;
6411
6412         /* Plane enabled? Get min/max allowed scaling factors from plane caps. */
6413         if (fb && state->crtc) {
6414                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6415                                              &min_downscale, &max_upscale);
6416                 /*
6417                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6418                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6419                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6420                  */
6421                 min_scale = (1000 << 16) / max_upscale;
6422                 max_scale = (1000 << 16) / min_downscale;
6423         }
6424
6425         return drm_atomic_helper_check_plane_state(
6426                 state, new_crtc_state, min_scale, max_scale, true, true);
6427 }
6428
6429 static int dm_plane_atomic_check(struct drm_plane *plane,
6430                                  struct drm_plane_state *state)
6431 {
6432         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6433         struct dc *dc = adev->dm.dc;
6434         struct dm_plane_state *dm_plane_state;
6435         struct dc_scaling_info scaling_info;
6436         struct drm_crtc_state *new_crtc_state;
6437         int ret;
6438
6439         trace_amdgpu_dm_plane_atomic_check(state);
6440
6441         dm_plane_state = to_dm_plane_state(state);
6442
6443         if (!dm_plane_state->dc_state)
6444                 return 0;
6445
6446         new_crtc_state =
6447                 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6448         if (!new_crtc_state)
6449                 return -EINVAL;
6450
6451         ret = dm_plane_helper_check_state(state, new_crtc_state);
6452         if (ret)
6453                 return ret;
6454
6455         ret = fill_dc_scaling_info(state, &scaling_info);
6456         if (ret)
6457                 return ret;
6458
6459         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6460                 return 0;
6461
6462         return -EINVAL;
6463 }
6464
6465 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6466                                        struct drm_plane_state *new_plane_state)
6467 {
6468         /* Only support async updates on cursor planes. */
6469         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6470                 return -EINVAL;
6471
6472         return 0;
6473 }
6474
6475 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6476                                          struct drm_plane_state *new_state)
6477 {
6478         struct drm_plane_state *old_state =
6479                 drm_atomic_get_old_plane_state(new_state->state, plane);
6480
6481         trace_amdgpu_dm_atomic_update_cursor(new_state);
6482
6483         swap(plane->state->fb, new_state->fb);
6484
6485         plane->state->src_x = new_state->src_x;
6486         plane->state->src_y = new_state->src_y;
6487         plane->state->src_w = new_state->src_w;
6488         plane->state->src_h = new_state->src_h;
6489         plane->state->crtc_x = new_state->crtc_x;
6490         plane->state->crtc_y = new_state->crtc_y;
6491         plane->state->crtc_w = new_state->crtc_w;
6492         plane->state->crtc_h = new_state->crtc_h;
6493
6494         handle_cursor_update(plane, old_state);
6495 }
6496
6497 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6498         .prepare_fb = dm_plane_helper_prepare_fb,
6499         .cleanup_fb = dm_plane_helper_cleanup_fb,
6500         .atomic_check = dm_plane_atomic_check,
6501         .atomic_async_check = dm_plane_atomic_async_check,
6502         .atomic_async_update = dm_plane_atomic_async_update
6503 };
6504
6505 /*
6506  * TODO: these are currently initialized to rgb formats only.
6507  * For future use cases we should either initialize them dynamically based on
6508  * plane capabilities, or initialize this array to all formats, so internal drm
6509  * check will succeed, and let DC implement proper check
6510  */
6511 static const uint32_t rgb_formats[] = {
6512         DRM_FORMAT_XRGB8888,
6513         DRM_FORMAT_ARGB8888,
6514         DRM_FORMAT_RGBA8888,
6515         DRM_FORMAT_XRGB2101010,
6516         DRM_FORMAT_XBGR2101010,
6517         DRM_FORMAT_ARGB2101010,
6518         DRM_FORMAT_ABGR2101010,
6519         DRM_FORMAT_XBGR8888,
6520         DRM_FORMAT_ABGR8888,
6521         DRM_FORMAT_RGB565,
6522 };
6523
6524 static const uint32_t overlay_formats[] = {
6525         DRM_FORMAT_XRGB8888,
6526         DRM_FORMAT_ARGB8888,
6527         DRM_FORMAT_RGBA8888,
6528         DRM_FORMAT_XBGR8888,
6529         DRM_FORMAT_ABGR8888,
6530         DRM_FORMAT_RGB565
6531 };
6532
6533 static const u32 cursor_formats[] = {
6534         DRM_FORMAT_ARGB8888
6535 };
6536
6537 static int get_plane_formats(const struct drm_plane *plane,
6538                              const struct dc_plane_cap *plane_cap,
6539                              uint32_t *formats, int max_formats)
6540 {
6541         int i, num_formats = 0;
6542
6543         /*
6544          * TODO: Query support for each group of formats directly from
6545          * DC plane caps. This will require adding more formats to the
6546          * caps list.
6547          */
6548
6549         switch (plane->type) {
6550         case DRM_PLANE_TYPE_PRIMARY:
6551                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6552                         if (num_formats >= max_formats)
6553                                 break;
6554
6555                         formats[num_formats++] = rgb_formats[i];
6556                 }
6557
6558                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6559                         formats[num_formats++] = DRM_FORMAT_NV12;
6560                 if (plane_cap && plane_cap->pixel_format_support.p010)
6561                         formats[num_formats++] = DRM_FORMAT_P010;
6562                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6563                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6564                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6565                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6566                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6567                 }
6568                 break;
6569
6570         case DRM_PLANE_TYPE_OVERLAY:
6571                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6572                         if (num_formats >= max_formats)
6573                                 break;
6574
6575                         formats[num_formats++] = overlay_formats[i];
6576                 }
6577                 break;
6578
6579         case DRM_PLANE_TYPE_CURSOR:
6580                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6581                         if (num_formats >= max_formats)
6582                                 break;
6583
6584                         formats[num_formats++] = cursor_formats[i];
6585                 }
6586                 break;
6587         }
6588
6589         return num_formats;
6590 }
6591
6592 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6593                                 struct drm_plane *plane,
6594                                 unsigned long possible_crtcs,
6595                                 const struct dc_plane_cap *plane_cap)
6596 {
6597         uint32_t formats[32];
6598         int num_formats;
6599         int res = -EPERM;
6600         unsigned int supported_rotations;
6601         uint64_t *modifiers = NULL;
6602
6603         num_formats = get_plane_formats(plane, plane_cap, formats,
6604                                         ARRAY_SIZE(formats));
6605
6606         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6607         if (res)
6608                 return res;
6609
6610         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6611                                        &dm_plane_funcs, formats, num_formats,
6612                                        modifiers, plane->type, NULL);
6613         kfree(modifiers);
6614         if (res)
6615                 return res;
6616
6617         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6618             plane_cap && plane_cap->per_pixel_alpha) {
6619                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6620                                           BIT(DRM_MODE_BLEND_PREMULTI);
6621
6622                 drm_plane_create_alpha_property(plane);
6623                 drm_plane_create_blend_mode_property(plane, blend_caps);
6624         }
6625
6626         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6627             plane_cap &&
6628             (plane_cap->pixel_format_support.nv12 ||
6629              plane_cap->pixel_format_support.p010)) {
6630                 /* This only affects YUV formats. */
6631                 drm_plane_create_color_properties(
6632                         plane,
6633                         BIT(DRM_COLOR_YCBCR_BT601) |
6634                         BIT(DRM_COLOR_YCBCR_BT709) |
6635                         BIT(DRM_COLOR_YCBCR_BT2020),
6636                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6637                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6638                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6639         }
6640
6641         supported_rotations =
6642                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6643                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6644
6645         if (dm->adev->asic_type >= CHIP_BONAIRE &&
6646             plane->type != DRM_PLANE_TYPE_CURSOR)
6647                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6648                                                    supported_rotations);
6649
6650         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6651
6652         /* Create (reset) the plane state */
6653         if (plane->funcs->reset)
6654                 plane->funcs->reset(plane);
6655
6656         return 0;
6657 }
6658
6659 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6660                                struct drm_plane *plane,
6661                                uint32_t crtc_index)
6662 {
6663         struct amdgpu_crtc *acrtc = NULL;
6664         struct drm_plane *cursor_plane;
6665
6666         int res = -ENOMEM;
6667
6668         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6669         if (!cursor_plane)
6670                 goto fail;
6671
6672         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6673         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6674
6675         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6676         if (!acrtc)
6677                 goto fail;
6678
6679         res = drm_crtc_init_with_planes(
6680                         dm->ddev,
6681                         &acrtc->base,
6682                         plane,
6683                         cursor_plane,
6684                         &amdgpu_dm_crtc_funcs, NULL);
6685
6686         if (res)
6687                 goto fail;
6688
6689         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6690
6691         /* Create (reset) the plane state */
6692         if (acrtc->base.funcs->reset)
6693                 acrtc->base.funcs->reset(&acrtc->base);
6694
6695         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6696         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6697
6698         acrtc->crtc_id = crtc_index;
6699         acrtc->base.enabled = false;
6700         acrtc->otg_inst = -1;
6701
6702         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6703         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6704                                    true, MAX_COLOR_LUT_ENTRIES);
6705         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6706
6707         return 0;
6708
6709 fail:
6710         kfree(acrtc);
6711         kfree(cursor_plane);
6712         return res;
6713 }
6714
6715
6716 static int to_drm_connector_type(enum signal_type st)
6717 {
6718         switch (st) {
6719         case SIGNAL_TYPE_HDMI_TYPE_A:
6720                 return DRM_MODE_CONNECTOR_HDMIA;
6721         case SIGNAL_TYPE_EDP:
6722                 return DRM_MODE_CONNECTOR_eDP;
6723         case SIGNAL_TYPE_LVDS:
6724                 return DRM_MODE_CONNECTOR_LVDS;
6725         case SIGNAL_TYPE_RGB:
6726                 return DRM_MODE_CONNECTOR_VGA;
6727         case SIGNAL_TYPE_DISPLAY_PORT:
6728         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6729                 return DRM_MODE_CONNECTOR_DisplayPort;
6730         case SIGNAL_TYPE_DVI_DUAL_LINK:
6731         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6732                 return DRM_MODE_CONNECTOR_DVID;
6733         case SIGNAL_TYPE_VIRTUAL:
6734                 return DRM_MODE_CONNECTOR_VIRTUAL;
6735
6736         default:
6737                 return DRM_MODE_CONNECTOR_Unknown;
6738         }
6739 }
6740
6741 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6742 {
6743         struct drm_encoder *encoder;
6744
6745         /* There is only one encoder per connector */
6746         drm_connector_for_each_possible_encoder(connector, encoder)
6747                 return encoder;
6748
6749         return NULL;
6750 }
6751
6752 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6753 {
6754         struct drm_encoder *encoder;
6755         struct amdgpu_encoder *amdgpu_encoder;
6756
6757         encoder = amdgpu_dm_connector_to_encoder(connector);
6758
6759         if (encoder == NULL)
6760                 return;
6761
6762         amdgpu_encoder = to_amdgpu_encoder(encoder);
6763
6764         amdgpu_encoder->native_mode.clock = 0;
6765
6766         if (!list_empty(&connector->probed_modes)) {
6767                 struct drm_display_mode *preferred_mode = NULL;
6768
6769                 list_for_each_entry(preferred_mode,
6770                                     &connector->probed_modes,
6771                                     head) {
6772                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6773                                 amdgpu_encoder->native_mode = *preferred_mode;
6774
6775                         break;
6776                 }
6777
6778         }
6779 }
6780
6781 static struct drm_display_mode *
6782 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6783                              char *name,
6784                              int hdisplay, int vdisplay)
6785 {
6786         struct drm_device *dev = encoder->dev;
6787         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6788         struct drm_display_mode *mode = NULL;
6789         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6790
6791         mode = drm_mode_duplicate(dev, native_mode);
6792
6793         if (mode == NULL)
6794                 return NULL;
6795
6796         mode->hdisplay = hdisplay;
6797         mode->vdisplay = vdisplay;
6798         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6799         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6800
6801         return mode;
6802
6803 }
6804
6805 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6806                                                  struct drm_connector *connector)
6807 {
6808         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6809         struct drm_display_mode *mode = NULL;
6810         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6811         struct amdgpu_dm_connector *amdgpu_dm_connector =
6812                                 to_amdgpu_dm_connector(connector);
6813         int i;
6814         int n;
6815         struct mode_size {
6816                 char name[DRM_DISPLAY_MODE_LEN];
6817                 int w;
6818                 int h;
6819         } common_modes[] = {
6820                 {  "640x480",  640,  480},
6821                 {  "800x600",  800,  600},
6822                 { "1024x768", 1024,  768},
6823                 { "1280x720", 1280,  720},
6824                 { "1280x800", 1280,  800},
6825                 {"1280x1024", 1280, 1024},
6826                 { "1440x900", 1440,  900},
6827                 {"1680x1050", 1680, 1050},
6828                 {"1600x1200", 1600, 1200},
6829                 {"1920x1080", 1920, 1080},
6830                 {"1920x1200", 1920, 1200}
6831         };
6832
6833         n = ARRAY_SIZE(common_modes);
6834
6835         for (i = 0; i < n; i++) {
6836                 struct drm_display_mode *curmode = NULL;
6837                 bool mode_existed = false;
6838
6839                 if (common_modes[i].w > native_mode->hdisplay ||
6840                     common_modes[i].h > native_mode->vdisplay ||
6841                    (common_modes[i].w == native_mode->hdisplay &&
6842                     common_modes[i].h == native_mode->vdisplay))
6843                         continue;
6844
6845                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6846                         if (common_modes[i].w == curmode->hdisplay &&
6847                             common_modes[i].h == curmode->vdisplay) {
6848                                 mode_existed = true;
6849                                 break;
6850                         }
6851                 }
6852
6853                 if (mode_existed)
6854                         continue;
6855
6856                 mode = amdgpu_dm_create_common_mode(encoder,
6857                                 common_modes[i].name, common_modes[i].w,
6858                                 common_modes[i].h);
6859                 drm_mode_probed_add(connector, mode);
6860                 amdgpu_dm_connector->num_modes++;
6861         }
6862 }
6863
6864 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6865                                               struct edid *edid)
6866 {
6867         struct amdgpu_dm_connector *amdgpu_dm_connector =
6868                         to_amdgpu_dm_connector(connector);
6869
6870         if (edid) {
6871                 /* empty probed_modes */
6872                 INIT_LIST_HEAD(&connector->probed_modes);
6873                 amdgpu_dm_connector->num_modes =
6874                                 drm_add_edid_modes(connector, edid);
6875
6876                 /* sorting the probed modes before calling function
6877                  * amdgpu_dm_get_native_mode() since EDID can have
6878                  * more than one preferred mode. The modes that are
6879                  * later in the probed mode list could be of higher
6880                  * and preferred resolution. For example, 3840x2160
6881                  * resolution in base EDID preferred timing and 4096x2160
6882                  * preferred resolution in DID extension block later.
6883                  */
6884                 drm_mode_sort(&connector->probed_modes);
6885                 amdgpu_dm_get_native_mode(connector);
6886         } else {
6887                 amdgpu_dm_connector->num_modes = 0;
6888         }
6889 }
6890
6891 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6892 {
6893         struct amdgpu_dm_connector *amdgpu_dm_connector =
6894                         to_amdgpu_dm_connector(connector);
6895         struct drm_encoder *encoder;
6896         struct edid *edid = amdgpu_dm_connector->edid;
6897
6898         encoder = amdgpu_dm_connector_to_encoder(connector);
6899
6900         if (!drm_edid_is_valid(edid)) {
6901                 amdgpu_dm_connector->num_modes =
6902                                 drm_add_modes_noedid(connector, 640, 480);
6903         } else {
6904                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6905                 amdgpu_dm_connector_add_common_modes(encoder, connector);
6906         }
6907         amdgpu_dm_fbc_init(connector);
6908
6909         return amdgpu_dm_connector->num_modes;
6910 }
6911
6912 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6913                                      struct amdgpu_dm_connector *aconnector,
6914                                      int connector_type,
6915                                      struct dc_link *link,
6916                                      int link_index)
6917 {
6918         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6919
6920         /*
6921          * Some of the properties below require access to state, like bpc.
6922          * Allocate some default initial connector state with our reset helper.
6923          */
6924         if (aconnector->base.funcs->reset)
6925                 aconnector->base.funcs->reset(&aconnector->base);
6926
6927         aconnector->connector_id = link_index;
6928         aconnector->dc_link = link;
6929         aconnector->base.interlace_allowed = false;
6930         aconnector->base.doublescan_allowed = false;
6931         aconnector->base.stereo_allowed = false;
6932         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6933         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6934         aconnector->audio_inst = -1;
6935         mutex_init(&aconnector->hpd_lock);
6936
6937         /*
6938          * configure support HPD hot plug connector_>polled default value is 0
6939          * which means HPD hot plug not supported
6940          */
6941         switch (connector_type) {
6942         case DRM_MODE_CONNECTOR_HDMIA:
6943                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6944                 aconnector->base.ycbcr_420_allowed =
6945                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6946                 break;
6947         case DRM_MODE_CONNECTOR_DisplayPort:
6948                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6949                 aconnector->base.ycbcr_420_allowed =
6950                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
6951                 break;
6952         case DRM_MODE_CONNECTOR_DVID:
6953                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6954                 break;
6955         default:
6956                 break;
6957         }
6958
6959         drm_object_attach_property(&aconnector->base.base,
6960                                 dm->ddev->mode_config.scaling_mode_property,
6961                                 DRM_MODE_SCALE_NONE);
6962
6963         drm_object_attach_property(&aconnector->base.base,
6964                                 adev->mode_info.underscan_property,
6965                                 UNDERSCAN_OFF);
6966         drm_object_attach_property(&aconnector->base.base,
6967                                 adev->mode_info.underscan_hborder_property,
6968                                 0);
6969         drm_object_attach_property(&aconnector->base.base,
6970                                 adev->mode_info.underscan_vborder_property,
6971                                 0);
6972
6973         if (!aconnector->mst_port)
6974                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6975
6976         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6977         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6978         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6979
6980         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6981             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6982                 drm_object_attach_property(&aconnector->base.base,
6983                                 adev->mode_info.abm_level_property, 0);
6984         }
6985
6986         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6987             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6988             connector_type == DRM_MODE_CONNECTOR_eDP) {
6989                 drm_object_attach_property(
6990                         &aconnector->base.base,
6991                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
6992
6993                 if (!aconnector->mst_port)
6994                         drm_connector_attach_vrr_capable_property(&aconnector->base);
6995
6996 #ifdef CONFIG_DRM_AMD_DC_HDCP
6997                 if (adev->dm.hdcp_workqueue)
6998                         drm_connector_attach_content_protection_property(&aconnector->base, true);
6999 #endif
7000         }
7001 }
7002
7003 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7004                               struct i2c_msg *msgs, int num)
7005 {
7006         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7007         struct ddc_service *ddc_service = i2c->ddc_service;
7008         struct i2c_command cmd;
7009         int i;
7010         int result = -EIO;
7011
7012         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7013
7014         if (!cmd.payloads)
7015                 return result;
7016
7017         cmd.number_of_payloads = num;
7018         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7019         cmd.speed = 100;
7020
7021         for (i = 0; i < num; i++) {
7022                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7023                 cmd.payloads[i].address = msgs[i].addr;
7024                 cmd.payloads[i].length = msgs[i].len;
7025                 cmd.payloads[i].data = msgs[i].buf;
7026         }
7027
7028         if (dc_submit_i2c(
7029                         ddc_service->ctx->dc,
7030                         ddc_service->ddc_pin->hw_info.ddc_channel,
7031                         &cmd))
7032                 result = num;
7033
7034         kfree(cmd.payloads);
7035         return result;
7036 }
7037
7038 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7039 {
7040         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7041 }
7042
7043 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7044         .master_xfer = amdgpu_dm_i2c_xfer,
7045         .functionality = amdgpu_dm_i2c_func,
7046 };
7047
7048 static struct amdgpu_i2c_adapter *
7049 create_i2c(struct ddc_service *ddc_service,
7050            int link_index,
7051            int *res)
7052 {
7053         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7054         struct amdgpu_i2c_adapter *i2c;
7055
7056         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7057         if (!i2c)
7058                 return NULL;
7059         i2c->base.owner = THIS_MODULE;
7060         i2c->base.class = I2C_CLASS_DDC;
7061         i2c->base.dev.parent = &adev->pdev->dev;
7062         i2c->base.algo = &amdgpu_dm_i2c_algo;
7063         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7064         i2c_set_adapdata(&i2c->base, i2c);
7065         i2c->ddc_service = ddc_service;
7066         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7067
7068         return i2c;
7069 }
7070
7071
7072 /*
7073  * Note: this function assumes that dc_link_detect() was called for the
7074  * dc_link which will be represented by this aconnector.
7075  */
7076 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7077                                     struct amdgpu_dm_connector *aconnector,
7078                                     uint32_t link_index,
7079                                     struct amdgpu_encoder *aencoder)
7080 {
7081         int res = 0;
7082         int connector_type;
7083         struct dc *dc = dm->dc;
7084         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7085         struct amdgpu_i2c_adapter *i2c;
7086
7087         link->priv = aconnector;
7088
7089         DRM_DEBUG_DRIVER("%s()\n", __func__);
7090
7091         i2c = create_i2c(link->ddc, link->link_index, &res);
7092         if (!i2c) {
7093                 DRM_ERROR("Failed to create i2c adapter data\n");
7094                 return -ENOMEM;
7095         }
7096
7097         aconnector->i2c = i2c;
7098         res = i2c_add_adapter(&i2c->base);
7099
7100         if (res) {
7101                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7102                 goto out_free;
7103         }
7104
7105         connector_type = to_drm_connector_type(link->connector_signal);
7106
7107         res = drm_connector_init_with_ddc(
7108                         dm->ddev,
7109                         &aconnector->base,
7110                         &amdgpu_dm_connector_funcs,
7111                         connector_type,
7112                         &i2c->base);
7113
7114         if (res) {
7115                 DRM_ERROR("connector_init failed\n");
7116                 aconnector->connector_id = -1;
7117                 goto out_free;
7118         }
7119
7120         drm_connector_helper_add(
7121                         &aconnector->base,
7122                         &amdgpu_dm_connector_helper_funcs);
7123
7124         amdgpu_dm_connector_init_helper(
7125                 dm,
7126                 aconnector,
7127                 connector_type,
7128                 link,
7129                 link_index);
7130
7131         drm_connector_attach_encoder(
7132                 &aconnector->base, &aencoder->base);
7133
7134         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7135                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7136                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7137
7138 out_free:
7139         if (res) {
7140                 kfree(i2c);
7141                 aconnector->i2c = NULL;
7142         }
7143         return res;
7144 }
7145
7146 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7147 {
7148         switch (adev->mode_info.num_crtc) {
7149         case 1:
7150                 return 0x1;
7151         case 2:
7152                 return 0x3;
7153         case 3:
7154                 return 0x7;
7155         case 4:
7156                 return 0xf;
7157         case 5:
7158                 return 0x1f;
7159         case 6:
7160         default:
7161                 return 0x3f;
7162         }
7163 }
7164
7165 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7166                                   struct amdgpu_encoder *aencoder,
7167                                   uint32_t link_index)
7168 {
7169         struct amdgpu_device *adev = drm_to_adev(dev);
7170
7171         int res = drm_encoder_init(dev,
7172                                    &aencoder->base,
7173                                    &amdgpu_dm_encoder_funcs,
7174                                    DRM_MODE_ENCODER_TMDS,
7175                                    NULL);
7176
7177         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7178
7179         if (!res)
7180                 aencoder->encoder_id = link_index;
7181         else
7182                 aencoder->encoder_id = -1;
7183
7184         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7185
7186         return res;
7187 }
7188
7189 static void manage_dm_interrupts(struct amdgpu_device *adev,
7190                                  struct amdgpu_crtc *acrtc,
7191                                  bool enable)
7192 {
7193         /*
7194          * We have no guarantee that the frontend index maps to the same
7195          * backend index - some even map to more than one.
7196          *
7197          * TODO: Use a different interrupt or check DC itself for the mapping.
7198          */
7199         int irq_type =
7200                 amdgpu_display_crtc_idx_to_irq_type(
7201                         adev,
7202                         acrtc->crtc_id);
7203
7204         if (enable) {
7205                 drm_crtc_vblank_on(&acrtc->base);
7206                 amdgpu_irq_get(
7207                         adev,
7208                         &adev->pageflip_irq,
7209                         irq_type);
7210         } else {
7211
7212                 amdgpu_irq_put(
7213                         adev,
7214                         &adev->pageflip_irq,
7215                         irq_type);
7216                 drm_crtc_vblank_off(&acrtc->base);
7217         }
7218 }
7219
7220 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7221                                       struct amdgpu_crtc *acrtc)
7222 {
7223         int irq_type =
7224                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7225
7226         /**
7227          * This reads the current state for the IRQ and force reapplies
7228          * the setting to hardware.
7229          */
7230         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7231 }
7232
7233 static bool
7234 is_scaling_state_different(const struct dm_connector_state *dm_state,
7235                            const struct dm_connector_state *old_dm_state)
7236 {
7237         if (dm_state->scaling != old_dm_state->scaling)
7238                 return true;
7239         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7240                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7241                         return true;
7242         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7243                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7244                         return true;
7245         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7246                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7247                 return true;
7248         return false;
7249 }
7250
7251 #ifdef CONFIG_DRM_AMD_DC_HDCP
7252 static bool is_content_protection_different(struct drm_connector_state *state,
7253                                             const struct drm_connector_state *old_state,
7254                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7255 {
7256         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7257         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7258
7259         /* Handle: Type0/1 change */
7260         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7261             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7262                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7263                 return true;
7264         }
7265
7266         /* CP is being re enabled, ignore this
7267          *
7268          * Handles:     ENABLED -> DESIRED
7269          */
7270         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7271             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7272                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7273                 return false;
7274         }
7275
7276         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7277          *
7278          * Handles:     UNDESIRED -> ENABLED
7279          */
7280         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7281             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7282                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7283
7284         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7285          * hot-plug, headless s3, dpms
7286          *
7287          * Handles:     DESIRED -> DESIRED (Special case)
7288          */
7289         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7290             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7291                 dm_con_state->update_hdcp = false;
7292                 return true;
7293         }
7294
7295         /*
7296          * Handles:     UNDESIRED -> UNDESIRED
7297          *              DESIRED -> DESIRED
7298          *              ENABLED -> ENABLED
7299          */
7300         if (old_state->content_protection == state->content_protection)
7301                 return false;
7302
7303         /*
7304          * Handles:     UNDESIRED -> DESIRED
7305          *              DESIRED -> UNDESIRED
7306          *              ENABLED -> UNDESIRED
7307          */
7308         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7309                 return true;
7310
7311         /*
7312          * Handles:     DESIRED -> ENABLED
7313          */
7314         return false;
7315 }
7316
7317 #endif
7318 static void remove_stream(struct amdgpu_device *adev,
7319                           struct amdgpu_crtc *acrtc,
7320                           struct dc_stream_state *stream)
7321 {
7322         /* this is the update mode case */
7323
7324         acrtc->otg_inst = -1;
7325         acrtc->enabled = false;
7326 }
7327
7328 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7329                                struct dc_cursor_position *position)
7330 {
7331         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7332         int x, y;
7333         int xorigin = 0, yorigin = 0;
7334
7335         position->enable = false;
7336         position->x = 0;
7337         position->y = 0;
7338
7339         if (!crtc || !plane->state->fb)
7340                 return 0;
7341
7342         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7343             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7344                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7345                           __func__,
7346                           plane->state->crtc_w,
7347                           plane->state->crtc_h);
7348                 return -EINVAL;
7349         }
7350
7351         x = plane->state->crtc_x;
7352         y = plane->state->crtc_y;
7353
7354         if (x <= -amdgpu_crtc->max_cursor_width ||
7355             y <= -amdgpu_crtc->max_cursor_height)
7356                 return 0;
7357
7358         if (x < 0) {
7359                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7360                 x = 0;
7361         }
7362         if (y < 0) {
7363                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7364                 y = 0;
7365         }
7366         position->enable = true;
7367         position->translate_by_source = true;
7368         position->x = x;
7369         position->y = y;
7370         position->x_hotspot = xorigin;
7371         position->y_hotspot = yorigin;
7372
7373         return 0;
7374 }
7375
7376 static void handle_cursor_update(struct drm_plane *plane,
7377                                  struct drm_plane_state *old_plane_state)
7378 {
7379         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7380         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7381         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7382         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7383         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7384         uint64_t address = afb ? afb->address : 0;
7385         struct dc_cursor_position position;
7386         struct dc_cursor_attributes attributes;
7387         int ret;
7388
7389         if (!plane->state->fb && !old_plane_state->fb)
7390                 return;
7391
7392         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7393                          __func__,
7394                          amdgpu_crtc->crtc_id,
7395                          plane->state->crtc_w,
7396                          plane->state->crtc_h);
7397
7398         ret = get_cursor_position(plane, crtc, &position);
7399         if (ret)
7400                 return;
7401
7402         if (!position.enable) {
7403                 /* turn off cursor */
7404                 if (crtc_state && crtc_state->stream) {
7405                         mutex_lock(&adev->dm.dc_lock);
7406                         dc_stream_set_cursor_position(crtc_state->stream,
7407                                                       &position);
7408                         mutex_unlock(&adev->dm.dc_lock);
7409                 }
7410                 return;
7411         }
7412
7413         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7414         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7415
7416         memset(&attributes, 0, sizeof(attributes));
7417         attributes.address.high_part = upper_32_bits(address);
7418         attributes.address.low_part  = lower_32_bits(address);
7419         attributes.width             = plane->state->crtc_w;
7420         attributes.height            = plane->state->crtc_h;
7421         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7422         attributes.rotation_angle    = 0;
7423         attributes.attribute_flags.value = 0;
7424
7425         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7426
7427         if (crtc_state->stream) {
7428                 mutex_lock(&adev->dm.dc_lock);
7429                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7430                                                          &attributes))
7431                         DRM_ERROR("DC failed to set cursor attributes\n");
7432
7433                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7434                                                    &position))
7435                         DRM_ERROR("DC failed to set cursor position\n");
7436                 mutex_unlock(&adev->dm.dc_lock);
7437         }
7438 }
7439
7440 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7441 {
7442
7443         assert_spin_locked(&acrtc->base.dev->event_lock);
7444         WARN_ON(acrtc->event);
7445
7446         acrtc->event = acrtc->base.state->event;
7447
7448         /* Set the flip status */
7449         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7450
7451         /* Mark this event as consumed */
7452         acrtc->base.state->event = NULL;
7453
7454         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7455                                                  acrtc->crtc_id);
7456 }
7457
7458 static void update_freesync_state_on_stream(
7459         struct amdgpu_display_manager *dm,
7460         struct dm_crtc_state *new_crtc_state,
7461         struct dc_stream_state *new_stream,
7462         struct dc_plane_state *surface,
7463         u32 flip_timestamp_in_us)
7464 {
7465         struct mod_vrr_params vrr_params;
7466         struct dc_info_packet vrr_infopacket = {0};
7467         struct amdgpu_device *adev = dm->adev;
7468         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7469         unsigned long flags;
7470
7471         if (!new_stream)
7472                 return;
7473
7474         /*
7475          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7476          * For now it's sufficient to just guard against these conditions.
7477          */
7478
7479         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7480                 return;
7481
7482         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7483         vrr_params = acrtc->dm_irq_params.vrr_params;
7484
7485         if (surface) {
7486                 mod_freesync_handle_preflip(
7487                         dm->freesync_module,
7488                         surface,
7489                         new_stream,
7490                         flip_timestamp_in_us,
7491                         &vrr_params);
7492
7493                 if (adev->family < AMDGPU_FAMILY_AI &&
7494                     amdgpu_dm_vrr_active(new_crtc_state)) {
7495                         mod_freesync_handle_v_update(dm->freesync_module,
7496                                                      new_stream, &vrr_params);
7497
7498                         /* Need to call this before the frame ends. */
7499                         dc_stream_adjust_vmin_vmax(dm->dc,
7500                                                    new_crtc_state->stream,
7501                                                    &vrr_params.adjust);
7502                 }
7503         }
7504
7505         mod_freesync_build_vrr_infopacket(
7506                 dm->freesync_module,
7507                 new_stream,
7508                 &vrr_params,
7509                 PACKET_TYPE_VRR,
7510                 TRANSFER_FUNC_UNKNOWN,
7511                 &vrr_infopacket);
7512
7513         new_crtc_state->freesync_timing_changed |=
7514                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7515                         &vrr_params.adjust,
7516                         sizeof(vrr_params.adjust)) != 0);
7517
7518         new_crtc_state->freesync_vrr_info_changed |=
7519                 (memcmp(&new_crtc_state->vrr_infopacket,
7520                         &vrr_infopacket,
7521                         sizeof(vrr_infopacket)) != 0);
7522
7523         acrtc->dm_irq_params.vrr_params = vrr_params;
7524         new_crtc_state->vrr_infopacket = vrr_infopacket;
7525
7526         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7527         new_stream->vrr_infopacket = vrr_infopacket;
7528
7529         if (new_crtc_state->freesync_vrr_info_changed)
7530                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7531                               new_crtc_state->base.crtc->base.id,
7532                               (int)new_crtc_state->base.vrr_enabled,
7533                               (int)vrr_params.state);
7534
7535         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7536 }
7537
7538 static void update_stream_irq_parameters(
7539         struct amdgpu_display_manager *dm,
7540         struct dm_crtc_state *new_crtc_state)
7541 {
7542         struct dc_stream_state *new_stream = new_crtc_state->stream;
7543         struct mod_vrr_params vrr_params;
7544         struct mod_freesync_config config = new_crtc_state->freesync_config;
7545         struct amdgpu_device *adev = dm->adev;
7546         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7547         unsigned long flags;
7548
7549         if (!new_stream)
7550                 return;
7551
7552         /*
7553          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7554          * For now it's sufficient to just guard against these conditions.
7555          */
7556         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7557                 return;
7558
7559         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7560         vrr_params = acrtc->dm_irq_params.vrr_params;
7561
7562         if (new_crtc_state->vrr_supported &&
7563             config.min_refresh_in_uhz &&
7564             config.max_refresh_in_uhz) {
7565                 config.state = new_crtc_state->base.vrr_enabled ?
7566                         VRR_STATE_ACTIVE_VARIABLE :
7567                         VRR_STATE_INACTIVE;
7568         } else {
7569                 config.state = VRR_STATE_UNSUPPORTED;
7570         }
7571
7572         mod_freesync_build_vrr_params(dm->freesync_module,
7573                                       new_stream,
7574                                       &config, &vrr_params);
7575
7576         new_crtc_state->freesync_timing_changed |=
7577                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7578                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7579
7580         new_crtc_state->freesync_config = config;
7581         /* Copy state for access from DM IRQ handler */
7582         acrtc->dm_irq_params.freesync_config = config;
7583         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7584         acrtc->dm_irq_params.vrr_params = vrr_params;
7585         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7586 }
7587
7588 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7589                                             struct dm_crtc_state *new_state)
7590 {
7591         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7592         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7593
7594         if (!old_vrr_active && new_vrr_active) {
7595                 /* Transition VRR inactive -> active:
7596                  * While VRR is active, we must not disable vblank irq, as a
7597                  * reenable after disable would compute bogus vblank/pflip
7598                  * timestamps if it likely happened inside display front-porch.
7599                  *
7600                  * We also need vupdate irq for the actual core vblank handling
7601                  * at end of vblank.
7602                  */
7603                 dm_set_vupdate_irq(new_state->base.crtc, true);
7604                 drm_crtc_vblank_get(new_state->base.crtc);
7605                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7606                                  __func__, new_state->base.crtc->base.id);
7607         } else if (old_vrr_active && !new_vrr_active) {
7608                 /* Transition VRR active -> inactive:
7609                  * Allow vblank irq disable again for fixed refresh rate.
7610                  */
7611                 dm_set_vupdate_irq(new_state->base.crtc, false);
7612                 drm_crtc_vblank_put(new_state->base.crtc);
7613                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7614                                  __func__, new_state->base.crtc->base.id);
7615         }
7616 }
7617
7618 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7619 {
7620         struct drm_plane *plane;
7621         struct drm_plane_state *old_plane_state, *new_plane_state;
7622         int i;
7623
7624         /*
7625          * TODO: Make this per-stream so we don't issue redundant updates for
7626          * commits with multiple streams.
7627          */
7628         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7629                                        new_plane_state, i)
7630                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7631                         handle_cursor_update(plane, old_plane_state);
7632 }
7633
7634 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7635                                     struct dc_state *dc_state,
7636                                     struct drm_device *dev,
7637                                     struct amdgpu_display_manager *dm,
7638                                     struct drm_crtc *pcrtc,
7639                                     bool wait_for_vblank)
7640 {
7641         int i;
7642         uint64_t timestamp_ns;
7643         struct drm_plane *plane;
7644         struct drm_plane_state *old_plane_state, *new_plane_state;
7645         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7646         struct drm_crtc_state *new_pcrtc_state =
7647                         drm_atomic_get_new_crtc_state(state, pcrtc);
7648         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7649         struct dm_crtc_state *dm_old_crtc_state =
7650                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7651         int planes_count = 0, vpos, hpos;
7652         long r;
7653         unsigned long flags;
7654         struct amdgpu_bo *abo;
7655         uint32_t target_vblank, last_flip_vblank;
7656         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7657         bool pflip_present = false;
7658         struct {
7659                 struct dc_surface_update surface_updates[MAX_SURFACES];
7660                 struct dc_plane_info plane_infos[MAX_SURFACES];
7661                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7662                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7663                 struct dc_stream_update stream_update;
7664         } *bundle;
7665
7666         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7667
7668         if (!bundle) {
7669                 dm_error("Failed to allocate update bundle\n");
7670                 goto cleanup;
7671         }
7672
7673         /*
7674          * Disable the cursor first if we're disabling all the planes.
7675          * It'll remain on the screen after the planes are re-enabled
7676          * if we don't.
7677          */
7678         if (acrtc_state->active_planes == 0)
7679                 amdgpu_dm_commit_cursors(state);
7680
7681         /* update planes when needed */
7682         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7683                 struct drm_crtc *crtc = new_plane_state->crtc;
7684                 struct drm_crtc_state *new_crtc_state;
7685                 struct drm_framebuffer *fb = new_plane_state->fb;
7686                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7687                 bool plane_needs_flip;
7688                 struct dc_plane_state *dc_plane;
7689                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7690
7691                 /* Cursor plane is handled after stream updates */
7692                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7693                         continue;
7694
7695                 if (!fb || !crtc || pcrtc != crtc)
7696                         continue;
7697
7698                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7699                 if (!new_crtc_state->active)
7700                         continue;
7701
7702                 dc_plane = dm_new_plane_state->dc_state;
7703
7704                 bundle->surface_updates[planes_count].surface = dc_plane;
7705                 if (new_pcrtc_state->color_mgmt_changed) {
7706                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7707                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7708                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7709                 }
7710
7711                 fill_dc_scaling_info(new_plane_state,
7712                                      &bundle->scaling_infos[planes_count]);
7713
7714                 bundle->surface_updates[planes_count].scaling_info =
7715                         &bundle->scaling_infos[planes_count];
7716
7717                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7718
7719                 pflip_present = pflip_present || plane_needs_flip;
7720
7721                 if (!plane_needs_flip) {
7722                         planes_count += 1;
7723                         continue;
7724                 }
7725
7726                 abo = gem_to_amdgpu_bo(fb->obj[0]);
7727
7728                 /*
7729                  * Wait for all fences on this FB. Do limited wait to avoid
7730                  * deadlock during GPU reset when this fence will not signal
7731                  * but we hold reservation lock for the BO.
7732                  */
7733                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7734                                                         false,
7735                                                         msecs_to_jiffies(5000));
7736                 if (unlikely(r <= 0))
7737                         DRM_ERROR("Waiting for fences timed out!");
7738
7739                 fill_dc_plane_info_and_addr(
7740                         dm->adev, new_plane_state,
7741                         afb->tiling_flags,
7742                         &bundle->plane_infos[planes_count],
7743                         &bundle->flip_addrs[planes_count].address,
7744                         afb->tmz_surface, false);
7745
7746                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7747                                  new_plane_state->plane->index,
7748                                  bundle->plane_infos[planes_count].dcc.enable);
7749
7750                 bundle->surface_updates[planes_count].plane_info =
7751                         &bundle->plane_infos[planes_count];
7752
7753                 /*
7754                  * Only allow immediate flips for fast updates that don't
7755                  * change FB pitch, DCC state, rotation or mirroing.
7756                  */
7757                 bundle->flip_addrs[planes_count].flip_immediate =
7758                         crtc->state->async_flip &&
7759                         acrtc_state->update_type == UPDATE_TYPE_FAST;
7760
7761                 timestamp_ns = ktime_get_ns();
7762                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7763                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7764                 bundle->surface_updates[planes_count].surface = dc_plane;
7765
7766                 if (!bundle->surface_updates[planes_count].surface) {
7767                         DRM_ERROR("No surface for CRTC: id=%d\n",
7768                                         acrtc_attach->crtc_id);
7769                         continue;
7770                 }
7771
7772                 if (plane == pcrtc->primary)
7773                         update_freesync_state_on_stream(
7774                                 dm,
7775                                 acrtc_state,
7776                                 acrtc_state->stream,
7777                                 dc_plane,
7778                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7779
7780                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7781                                  __func__,
7782                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7783                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7784
7785                 planes_count += 1;
7786
7787         }
7788
7789         if (pflip_present) {
7790                 if (!vrr_active) {
7791                         /* Use old throttling in non-vrr fixed refresh rate mode
7792                          * to keep flip scheduling based on target vblank counts
7793                          * working in a backwards compatible way, e.g., for
7794                          * clients using the GLX_OML_sync_control extension or
7795                          * DRI3/Present extension with defined target_msc.
7796                          */
7797                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7798                 }
7799                 else {
7800                         /* For variable refresh rate mode only:
7801                          * Get vblank of last completed flip to avoid > 1 vrr
7802                          * flips per video frame by use of throttling, but allow
7803                          * flip programming anywhere in the possibly large
7804                          * variable vrr vblank interval for fine-grained flip
7805                          * timing control and more opportunity to avoid stutter
7806                          * on late submission of flips.
7807                          */
7808                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7809                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7810                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7811                 }
7812
7813                 target_vblank = last_flip_vblank + wait_for_vblank;
7814
7815                 /*
7816                  * Wait until we're out of the vertical blank period before the one
7817                  * targeted by the flip
7818                  */
7819                 while ((acrtc_attach->enabled &&
7820                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7821                                                             0, &vpos, &hpos, NULL,
7822                                                             NULL, &pcrtc->hwmode)
7823                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7824                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7825                         (int)(target_vblank -
7826                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7827                         usleep_range(1000, 1100);
7828                 }
7829
7830                 /**
7831                  * Prepare the flip event for the pageflip interrupt to handle.
7832                  *
7833                  * This only works in the case where we've already turned on the
7834                  * appropriate hardware blocks (eg. HUBP) so in the transition case
7835                  * from 0 -> n planes we have to skip a hardware generated event
7836                  * and rely on sending it from software.
7837                  */
7838                 if (acrtc_attach->base.state->event &&
7839                     acrtc_state->active_planes > 0) {
7840                         drm_crtc_vblank_get(pcrtc);
7841
7842                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7843
7844                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7845                         prepare_flip_isr(acrtc_attach);
7846
7847                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7848                 }
7849
7850                 if (acrtc_state->stream) {
7851                         if (acrtc_state->freesync_vrr_info_changed)
7852                                 bundle->stream_update.vrr_infopacket =
7853                                         &acrtc_state->stream->vrr_infopacket;
7854                 }
7855         }
7856
7857         /* Update the planes if changed or disable if we don't have any. */
7858         if ((planes_count || acrtc_state->active_planes == 0) &&
7859                 acrtc_state->stream) {
7860                 bundle->stream_update.stream = acrtc_state->stream;
7861                 if (new_pcrtc_state->mode_changed) {
7862                         bundle->stream_update.src = acrtc_state->stream->src;
7863                         bundle->stream_update.dst = acrtc_state->stream->dst;
7864                 }
7865
7866                 if (new_pcrtc_state->color_mgmt_changed) {
7867                         /*
7868                          * TODO: This isn't fully correct since we've actually
7869                          * already modified the stream in place.
7870                          */
7871                         bundle->stream_update.gamut_remap =
7872                                 &acrtc_state->stream->gamut_remap_matrix;
7873                         bundle->stream_update.output_csc_transform =
7874                                 &acrtc_state->stream->csc_color_matrix;
7875                         bundle->stream_update.out_transfer_func =
7876                                 acrtc_state->stream->out_transfer_func;
7877                 }
7878
7879                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7880                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7881                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7882
7883                 /*
7884                  * If FreeSync state on the stream has changed then we need to
7885                  * re-adjust the min/max bounds now that DC doesn't handle this
7886                  * as part of commit.
7887                  */
7888                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7889                     amdgpu_dm_vrr_active(acrtc_state)) {
7890                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7891                         dc_stream_adjust_vmin_vmax(
7892                                 dm->dc, acrtc_state->stream,
7893                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7894                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7895                 }
7896                 mutex_lock(&dm->dc_lock);
7897                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7898                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7899                         amdgpu_dm_psr_disable(acrtc_state->stream);
7900
7901                 dc_commit_updates_for_stream(dm->dc,
7902                                                      bundle->surface_updates,
7903                                                      planes_count,
7904                                                      acrtc_state->stream,
7905                                                      &bundle->stream_update);
7906
7907                 /**
7908                  * Enable or disable the interrupts on the backend.
7909                  *
7910                  * Most pipes are put into power gating when unused.
7911                  *
7912                  * When power gating is enabled on a pipe we lose the
7913                  * interrupt enablement state when power gating is disabled.
7914                  *
7915                  * So we need to update the IRQ control state in hardware
7916                  * whenever the pipe turns on (since it could be previously
7917                  * power gated) or off (since some pipes can't be power gated
7918                  * on some ASICs).
7919                  */
7920                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7921                         dm_update_pflip_irq_state(drm_to_adev(dev),
7922                                                   acrtc_attach);
7923
7924                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7925                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7926                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7927                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
7928                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7929                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7930                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7931                         amdgpu_dm_psr_enable(acrtc_state->stream);
7932                 }
7933
7934                 mutex_unlock(&dm->dc_lock);
7935         }
7936
7937         /*
7938          * Update cursor state *after* programming all the planes.
7939          * This avoids redundant programming in the case where we're going
7940          * to be disabling a single plane - those pipes are being disabled.
7941          */
7942         if (acrtc_state->active_planes)
7943                 amdgpu_dm_commit_cursors(state);
7944
7945 cleanup:
7946         kfree(bundle);
7947 }
7948
7949 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7950                                    struct drm_atomic_state *state)
7951 {
7952         struct amdgpu_device *adev = drm_to_adev(dev);
7953         struct amdgpu_dm_connector *aconnector;
7954         struct drm_connector *connector;
7955         struct drm_connector_state *old_con_state, *new_con_state;
7956         struct drm_crtc_state *new_crtc_state;
7957         struct dm_crtc_state *new_dm_crtc_state;
7958         const struct dc_stream_status *status;
7959         int i, inst;
7960
7961         /* Notify device removals. */
7962         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7963                 if (old_con_state->crtc != new_con_state->crtc) {
7964                         /* CRTC changes require notification. */
7965                         goto notify;
7966                 }
7967
7968                 if (!new_con_state->crtc)
7969                         continue;
7970
7971                 new_crtc_state = drm_atomic_get_new_crtc_state(
7972                         state, new_con_state->crtc);
7973
7974                 if (!new_crtc_state)
7975                         continue;
7976
7977                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7978                         continue;
7979
7980         notify:
7981                 aconnector = to_amdgpu_dm_connector(connector);
7982
7983                 mutex_lock(&adev->dm.audio_lock);
7984                 inst = aconnector->audio_inst;
7985                 aconnector->audio_inst = -1;
7986                 mutex_unlock(&adev->dm.audio_lock);
7987
7988                 amdgpu_dm_audio_eld_notify(adev, inst);
7989         }
7990
7991         /* Notify audio device additions. */
7992         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7993                 if (!new_con_state->crtc)
7994                         continue;
7995
7996                 new_crtc_state = drm_atomic_get_new_crtc_state(
7997                         state, new_con_state->crtc);
7998
7999                 if (!new_crtc_state)
8000                         continue;
8001
8002                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8003                         continue;
8004
8005                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8006                 if (!new_dm_crtc_state->stream)
8007                         continue;
8008
8009                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8010                 if (!status)
8011                         continue;
8012
8013                 aconnector = to_amdgpu_dm_connector(connector);
8014
8015                 mutex_lock(&adev->dm.audio_lock);
8016                 inst = status->audio_inst;
8017                 aconnector->audio_inst = inst;
8018                 mutex_unlock(&adev->dm.audio_lock);
8019
8020                 amdgpu_dm_audio_eld_notify(adev, inst);
8021         }
8022 }
8023
8024 /*
8025  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8026  * @crtc_state: the DRM CRTC state
8027  * @stream_state: the DC stream state.
8028  *
8029  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8030  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8031  */
8032 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8033                                                 struct dc_stream_state *stream_state)
8034 {
8035         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8036 }
8037
8038 /**
8039  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8040  * @state: The atomic state to commit
8041  *
8042  * This will tell DC to commit the constructed DC state from atomic_check,
8043  * programming the hardware. Any failures here implies a hardware failure, since
8044  * atomic check should have filtered anything non-kosher.
8045  */
8046 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8047 {
8048         struct drm_device *dev = state->dev;
8049         struct amdgpu_device *adev = drm_to_adev(dev);
8050         struct amdgpu_display_manager *dm = &adev->dm;
8051         struct dm_atomic_state *dm_state;
8052         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8053         uint32_t i, j;
8054         struct drm_crtc *crtc;
8055         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8056         unsigned long flags;
8057         bool wait_for_vblank = true;
8058         struct drm_connector *connector;
8059         struct drm_connector_state *old_con_state, *new_con_state;
8060         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8061         int crtc_disable_count = 0;
8062         bool mode_set_reset_required = false;
8063
8064         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8065
8066         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8067
8068         dm_state = dm_atomic_get_new_state(state);
8069         if (dm_state && dm_state->context) {
8070                 dc_state = dm_state->context;
8071         } else {
8072                 /* No state changes, retain current state. */
8073                 dc_state_temp = dc_create_state(dm->dc);
8074                 ASSERT(dc_state_temp);
8075                 dc_state = dc_state_temp;
8076                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8077         }
8078
8079         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8080                                        new_crtc_state, i) {
8081                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8082
8083                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8084
8085                 if (old_crtc_state->active &&
8086                     (!new_crtc_state->active ||
8087                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8088                         manage_dm_interrupts(adev, acrtc, false);
8089                         dc_stream_release(dm_old_crtc_state->stream);
8090                 }
8091         }
8092
8093         drm_atomic_helper_calc_timestamping_constants(state);
8094
8095         /* update changed items */
8096         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8097                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8098
8099                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8100                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8101
8102                 DRM_DEBUG_DRIVER(
8103                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8104                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8105                         "connectors_changed:%d\n",
8106                         acrtc->crtc_id,
8107                         new_crtc_state->enable,
8108                         new_crtc_state->active,
8109                         new_crtc_state->planes_changed,
8110                         new_crtc_state->mode_changed,
8111                         new_crtc_state->active_changed,
8112                         new_crtc_state->connectors_changed);
8113
8114                 /* Disable cursor if disabling crtc */
8115                 if (old_crtc_state->active && !new_crtc_state->active) {
8116                         struct dc_cursor_position position;
8117
8118                         memset(&position, 0, sizeof(position));
8119                         mutex_lock(&dm->dc_lock);
8120                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8121                         mutex_unlock(&dm->dc_lock);
8122                 }
8123
8124                 /* Copy all transient state flags into dc state */
8125                 if (dm_new_crtc_state->stream) {
8126                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8127                                                             dm_new_crtc_state->stream);
8128                 }
8129
8130                 /* handles headless hotplug case, updating new_state and
8131                  * aconnector as needed
8132                  */
8133
8134                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8135
8136                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8137
8138                         if (!dm_new_crtc_state->stream) {
8139                                 /*
8140                                  * this could happen because of issues with
8141                                  * userspace notifications delivery.
8142                                  * In this case userspace tries to set mode on
8143                                  * display which is disconnected in fact.
8144                                  * dc_sink is NULL in this case on aconnector.
8145                                  * We expect reset mode will come soon.
8146                                  *
8147                                  * This can also happen when unplug is done
8148                                  * during resume sequence ended
8149                                  *
8150                                  * In this case, we want to pretend we still
8151                                  * have a sink to keep the pipe running so that
8152                                  * hw state is consistent with the sw state
8153                                  */
8154                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8155                                                 __func__, acrtc->base.base.id);
8156                                 continue;
8157                         }
8158
8159                         if (dm_old_crtc_state->stream)
8160                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8161
8162                         pm_runtime_get_noresume(dev->dev);
8163
8164                         acrtc->enabled = true;
8165                         acrtc->hw_mode = new_crtc_state->mode;
8166                         crtc->hwmode = new_crtc_state->mode;
8167                         mode_set_reset_required = true;
8168                 } else if (modereset_required(new_crtc_state)) {
8169                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8170                         /* i.e. reset mode */
8171                         if (dm_old_crtc_state->stream)
8172                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8173                         mode_set_reset_required = true;
8174                 }
8175         } /* for_each_crtc_in_state() */
8176
8177         if (dc_state) {
8178                 /* if there mode set or reset, disable eDP PSR */
8179                 if (mode_set_reset_required)
8180                         amdgpu_dm_psr_disable_all(dm);
8181
8182                 dm_enable_per_frame_crtc_master_sync(dc_state);
8183                 mutex_lock(&dm->dc_lock);
8184                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8185                 mutex_unlock(&dm->dc_lock);
8186         }
8187
8188         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8189                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8190
8191                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8192
8193                 if (dm_new_crtc_state->stream != NULL) {
8194                         const struct dc_stream_status *status =
8195                                         dc_stream_get_status(dm_new_crtc_state->stream);
8196
8197                         if (!status)
8198                                 status = dc_stream_get_status_from_state(dc_state,
8199                                                                          dm_new_crtc_state->stream);
8200                         if (!status)
8201                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8202                         else
8203                                 acrtc->otg_inst = status->primary_otg_inst;
8204                 }
8205         }
8206 #ifdef CONFIG_DRM_AMD_DC_HDCP
8207         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8208                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8209                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8210                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8211
8212                 new_crtc_state = NULL;
8213
8214                 if (acrtc)
8215                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8216
8217                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8218
8219                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8220                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8221                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8222                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8223                         dm_new_con_state->update_hdcp = true;
8224                         continue;
8225                 }
8226
8227                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8228                         hdcp_update_display(
8229                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8230                                 new_con_state->hdcp_content_type,
8231                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8232                                                                                                          : false);
8233         }
8234 #endif
8235
8236         /* Handle connector state changes */
8237         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8238                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8239                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8240                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8241                 struct dc_surface_update surface_updates[MAX_SURFACES];
8242                 struct dc_stream_update stream_update;
8243                 struct dc_info_packet hdr_packet;
8244                 struct dc_stream_status *status = NULL;
8245                 bool abm_changed, hdr_changed, scaling_changed;
8246
8247                 memset(&surface_updates, 0, sizeof(surface_updates));
8248                 memset(&stream_update, 0, sizeof(stream_update));
8249
8250                 if (acrtc) {
8251                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8252                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8253                 }
8254
8255                 /* Skip any modesets/resets */
8256                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8257                         continue;
8258
8259                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8260                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8261
8262                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8263                                                              dm_old_con_state);
8264
8265                 abm_changed = dm_new_crtc_state->abm_level !=
8266                               dm_old_crtc_state->abm_level;
8267
8268                 hdr_changed =
8269                         is_hdr_metadata_different(old_con_state, new_con_state);
8270
8271                 if (!scaling_changed && !abm_changed && !hdr_changed)
8272                         continue;
8273
8274                 stream_update.stream = dm_new_crtc_state->stream;
8275                 if (scaling_changed) {
8276                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8277                                         dm_new_con_state, dm_new_crtc_state->stream);
8278
8279                         stream_update.src = dm_new_crtc_state->stream->src;
8280                         stream_update.dst = dm_new_crtc_state->stream->dst;
8281                 }
8282
8283                 if (abm_changed) {
8284                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8285
8286                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8287                 }
8288
8289                 if (hdr_changed) {
8290                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8291                         stream_update.hdr_static_metadata = &hdr_packet;
8292                 }
8293
8294                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8295                 WARN_ON(!status);
8296                 WARN_ON(!status->plane_count);
8297
8298                 /*
8299                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8300                  * Here we create an empty update on each plane.
8301                  * To fix this, DC should permit updating only stream properties.
8302                  */
8303                 for (j = 0; j < status->plane_count; j++)
8304                         surface_updates[j].surface = status->plane_states[j];
8305
8306
8307                 mutex_lock(&dm->dc_lock);
8308                 dc_commit_updates_for_stream(dm->dc,
8309                                                 surface_updates,
8310                                                      status->plane_count,
8311                                                      dm_new_crtc_state->stream,
8312                                                      &stream_update);
8313                 mutex_unlock(&dm->dc_lock);
8314         }
8315
8316         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8317         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8318                                       new_crtc_state, i) {
8319                 if (old_crtc_state->active && !new_crtc_state->active)
8320                         crtc_disable_count++;
8321
8322                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8323                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8324
8325                 /* For freesync config update on crtc state and params for irq */
8326                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8327
8328                 /* Handle vrr on->off / off->on transitions */
8329                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8330                                                 dm_new_crtc_state);
8331         }
8332
8333         /**
8334          * Enable interrupts for CRTCs that are newly enabled or went through
8335          * a modeset. It was intentionally deferred until after the front end
8336          * state was modified to wait until the OTG was on and so the IRQ
8337          * handlers didn't access stale or invalid state.
8338          */
8339         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8340                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8341
8342                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8343
8344                 if (new_crtc_state->active &&
8345                     (!old_crtc_state->active ||
8346                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8347                         dc_stream_retain(dm_new_crtc_state->stream);
8348                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8349                         manage_dm_interrupts(adev, acrtc, true);
8350
8351 #ifdef CONFIG_DEBUG_FS
8352                         /**
8353                          * Frontend may have changed so reapply the CRC capture
8354                          * settings for the stream.
8355                          */
8356                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8357
8358                         if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8359                                 amdgpu_dm_crtc_configure_crc_source(
8360                                         crtc, dm_new_crtc_state,
8361                                         dm_new_crtc_state->crc_src);
8362                         }
8363 #endif
8364                 }
8365         }
8366
8367         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8368                 if (new_crtc_state->async_flip)
8369                         wait_for_vblank = false;
8370
8371         /* update planes when needed per crtc*/
8372         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8373                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8374
8375                 if (dm_new_crtc_state->stream)
8376                         amdgpu_dm_commit_planes(state, dc_state, dev,
8377                                                 dm, crtc, wait_for_vblank);
8378         }
8379
8380         /* Update audio instances for each connector. */
8381         amdgpu_dm_commit_audio(dev, state);
8382
8383         /*
8384          * send vblank event on all events not handled in flip and
8385          * mark consumed event for drm_atomic_helper_commit_hw_done
8386          */
8387         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8388         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8389
8390                 if (new_crtc_state->event)
8391                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8392
8393                 new_crtc_state->event = NULL;
8394         }
8395         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8396
8397         /* Signal HW programming completion */
8398         drm_atomic_helper_commit_hw_done(state);
8399
8400         if (wait_for_vblank)
8401                 drm_atomic_helper_wait_for_flip_done(dev, state);
8402
8403         drm_atomic_helper_cleanup_planes(dev, state);
8404
8405         /* return the stolen vga memory back to VRAM */
8406         if (!adev->mman.keep_stolen_vga_memory)
8407                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8408         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8409
8410         /*
8411          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8412          * so we can put the GPU into runtime suspend if we're not driving any
8413          * displays anymore
8414          */
8415         for (i = 0; i < crtc_disable_count; i++)
8416                 pm_runtime_put_autosuspend(dev->dev);
8417         pm_runtime_mark_last_busy(dev->dev);
8418
8419         if (dc_state_temp)
8420                 dc_release_state(dc_state_temp);
8421 }
8422
8423
8424 static int dm_force_atomic_commit(struct drm_connector *connector)
8425 {
8426         int ret = 0;
8427         struct drm_device *ddev = connector->dev;
8428         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8429         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8430         struct drm_plane *plane = disconnected_acrtc->base.primary;
8431         struct drm_connector_state *conn_state;
8432         struct drm_crtc_state *crtc_state;
8433         struct drm_plane_state *plane_state;
8434
8435         if (!state)
8436                 return -ENOMEM;
8437
8438         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8439
8440         /* Construct an atomic state to restore previous display setting */
8441
8442         /*
8443          * Attach connectors to drm_atomic_state
8444          */
8445         conn_state = drm_atomic_get_connector_state(state, connector);
8446
8447         ret = PTR_ERR_OR_ZERO(conn_state);
8448         if (ret)
8449                 goto out;
8450
8451         /* Attach crtc to drm_atomic_state*/
8452         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8453
8454         ret = PTR_ERR_OR_ZERO(crtc_state);
8455         if (ret)
8456                 goto out;
8457
8458         /* force a restore */
8459         crtc_state->mode_changed = true;
8460
8461         /* Attach plane to drm_atomic_state */
8462         plane_state = drm_atomic_get_plane_state(state, plane);
8463
8464         ret = PTR_ERR_OR_ZERO(plane_state);
8465         if (ret)
8466                 goto out;
8467
8468         /* Call commit internally with the state we just constructed */
8469         ret = drm_atomic_commit(state);
8470
8471 out:
8472         drm_atomic_state_put(state);
8473         if (ret)
8474                 DRM_ERROR("Restoring old state failed with %i\n", ret);
8475
8476         return ret;
8477 }
8478
8479 /*
8480  * This function handles all cases when set mode does not come upon hotplug.
8481  * This includes when a display is unplugged then plugged back into the
8482  * same port and when running without usermode desktop manager supprot
8483  */
8484 void dm_restore_drm_connector_state(struct drm_device *dev,
8485                                     struct drm_connector *connector)
8486 {
8487         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8488         struct amdgpu_crtc *disconnected_acrtc;
8489         struct dm_crtc_state *acrtc_state;
8490
8491         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8492                 return;
8493
8494         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8495         if (!disconnected_acrtc)
8496                 return;
8497
8498         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8499         if (!acrtc_state->stream)
8500                 return;
8501
8502         /*
8503          * If the previous sink is not released and different from the current,
8504          * we deduce we are in a state where we can not rely on usermode call
8505          * to turn on the display, so we do it here
8506          */
8507         if (acrtc_state->stream->sink != aconnector->dc_sink)
8508                 dm_force_atomic_commit(&aconnector->base);
8509 }
8510
8511 /*
8512  * Grabs all modesetting locks to serialize against any blocking commits,
8513  * Waits for completion of all non blocking commits.
8514  */
8515 static int do_aquire_global_lock(struct drm_device *dev,
8516                                  struct drm_atomic_state *state)
8517 {
8518         struct drm_crtc *crtc;
8519         struct drm_crtc_commit *commit;
8520         long ret;
8521
8522         /*
8523          * Adding all modeset locks to aquire_ctx will
8524          * ensure that when the framework release it the
8525          * extra locks we are locking here will get released to
8526          */
8527         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8528         if (ret)
8529                 return ret;
8530
8531         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8532                 spin_lock(&crtc->commit_lock);
8533                 commit = list_first_entry_or_null(&crtc->commit_list,
8534                                 struct drm_crtc_commit, commit_entry);
8535                 if (commit)
8536                         drm_crtc_commit_get(commit);
8537                 spin_unlock(&crtc->commit_lock);
8538
8539                 if (!commit)
8540                         continue;
8541
8542                 /*
8543                  * Make sure all pending HW programming completed and
8544                  * page flips done
8545                  */
8546                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8547
8548                 if (ret > 0)
8549                         ret = wait_for_completion_interruptible_timeout(
8550                                         &commit->flip_done, 10*HZ);
8551
8552                 if (ret == 0)
8553                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8554                                   "timed out\n", crtc->base.id, crtc->name);
8555
8556                 drm_crtc_commit_put(commit);
8557         }
8558
8559         return ret < 0 ? ret : 0;
8560 }
8561
8562 static void get_freesync_config_for_crtc(
8563         struct dm_crtc_state *new_crtc_state,
8564         struct dm_connector_state *new_con_state)
8565 {
8566         struct mod_freesync_config config = {0};
8567         struct amdgpu_dm_connector *aconnector =
8568                         to_amdgpu_dm_connector(new_con_state->base.connector);
8569         struct drm_display_mode *mode = &new_crtc_state->base.mode;
8570         int vrefresh = drm_mode_vrefresh(mode);
8571
8572         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8573                                         vrefresh >= aconnector->min_vfreq &&
8574                                         vrefresh <= aconnector->max_vfreq;
8575
8576         if (new_crtc_state->vrr_supported) {
8577                 new_crtc_state->stream->ignore_msa_timing_param = true;
8578                 config.state = new_crtc_state->base.vrr_enabled ?
8579                                 VRR_STATE_ACTIVE_VARIABLE :
8580                                 VRR_STATE_INACTIVE;
8581                 config.min_refresh_in_uhz =
8582                                 aconnector->min_vfreq * 1000000;
8583                 config.max_refresh_in_uhz =
8584                                 aconnector->max_vfreq * 1000000;
8585                 config.vsif_supported = true;
8586                 config.btr = true;
8587         }
8588
8589         new_crtc_state->freesync_config = config;
8590 }
8591
8592 static void reset_freesync_config_for_crtc(
8593         struct dm_crtc_state *new_crtc_state)
8594 {
8595         new_crtc_state->vrr_supported = false;
8596
8597         memset(&new_crtc_state->vrr_infopacket, 0,
8598                sizeof(new_crtc_state->vrr_infopacket));
8599 }
8600
8601 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8602                                 struct drm_atomic_state *state,
8603                                 struct drm_crtc *crtc,
8604                                 struct drm_crtc_state *old_crtc_state,
8605                                 struct drm_crtc_state *new_crtc_state,
8606                                 bool enable,
8607                                 bool *lock_and_validation_needed)
8608 {
8609         struct dm_atomic_state *dm_state = NULL;
8610         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8611         struct dc_stream_state *new_stream;
8612         int ret = 0;
8613
8614         /*
8615          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8616          * update changed items
8617          */
8618         struct amdgpu_crtc *acrtc = NULL;
8619         struct amdgpu_dm_connector *aconnector = NULL;
8620         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8621         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8622
8623         new_stream = NULL;
8624
8625         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8626         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8627         acrtc = to_amdgpu_crtc(crtc);
8628         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8629
8630         /* TODO This hack should go away */
8631         if (aconnector && enable) {
8632                 /* Make sure fake sink is created in plug-in scenario */
8633                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8634                                                             &aconnector->base);
8635                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8636                                                             &aconnector->base);
8637
8638                 if (IS_ERR(drm_new_conn_state)) {
8639                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8640                         goto fail;
8641                 }
8642
8643                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8644                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8645
8646                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8647                         goto skip_modeset;
8648
8649                 new_stream = create_validate_stream_for_sink(aconnector,
8650                                                              &new_crtc_state->mode,
8651                                                              dm_new_conn_state,
8652                                                              dm_old_crtc_state->stream);
8653
8654                 /*
8655                  * we can have no stream on ACTION_SET if a display
8656                  * was disconnected during S3, in this case it is not an
8657                  * error, the OS will be updated after detection, and
8658                  * will do the right thing on next atomic commit
8659                  */
8660
8661                 if (!new_stream) {
8662                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8663                                         __func__, acrtc->base.base.id);
8664                         ret = -ENOMEM;
8665                         goto fail;
8666                 }
8667
8668                 /*
8669                  * TODO: Check VSDB bits to decide whether this should
8670                  * be enabled or not.
8671                  */
8672                 new_stream->triggered_crtc_reset.enabled =
8673                         dm->force_timing_sync;
8674
8675                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8676
8677                 ret = fill_hdr_info_packet(drm_new_conn_state,
8678                                            &new_stream->hdr_static_metadata);
8679                 if (ret)
8680                         goto fail;
8681
8682                 /*
8683                  * If we already removed the old stream from the context
8684                  * (and set the new stream to NULL) then we can't reuse
8685                  * the old stream even if the stream and scaling are unchanged.
8686                  * We'll hit the BUG_ON and black screen.
8687                  *
8688                  * TODO: Refactor this function to allow this check to work
8689                  * in all conditions.
8690                  */
8691                 if (dm_new_crtc_state->stream &&
8692                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8693                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8694                         new_crtc_state->mode_changed = false;
8695                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8696                                          new_crtc_state->mode_changed);
8697                 }
8698         }
8699
8700         /* mode_changed flag may get updated above, need to check again */
8701         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8702                 goto skip_modeset;
8703
8704         DRM_DEBUG_DRIVER(
8705                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8706                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8707                 "connectors_changed:%d\n",
8708                 acrtc->crtc_id,
8709                 new_crtc_state->enable,
8710                 new_crtc_state->active,
8711                 new_crtc_state->planes_changed,
8712                 new_crtc_state->mode_changed,
8713                 new_crtc_state->active_changed,
8714                 new_crtc_state->connectors_changed);
8715
8716         /* Remove stream for any changed/disabled CRTC */
8717         if (!enable) {
8718
8719                 if (!dm_old_crtc_state->stream)
8720                         goto skip_modeset;
8721
8722                 ret = dm_atomic_get_state(state, &dm_state);
8723                 if (ret)
8724                         goto fail;
8725
8726                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8727                                 crtc->base.id);
8728
8729                 /* i.e. reset mode */
8730                 if (dc_remove_stream_from_ctx(
8731                                 dm->dc,
8732                                 dm_state->context,
8733                                 dm_old_crtc_state->stream) != DC_OK) {
8734                         ret = -EINVAL;
8735                         goto fail;
8736                 }
8737
8738                 dc_stream_release(dm_old_crtc_state->stream);
8739                 dm_new_crtc_state->stream = NULL;
8740
8741                 reset_freesync_config_for_crtc(dm_new_crtc_state);
8742
8743                 *lock_and_validation_needed = true;
8744
8745         } else {/* Add stream for any updated/enabled CRTC */
8746                 /*
8747                  * Quick fix to prevent NULL pointer on new_stream when
8748                  * added MST connectors not found in existing crtc_state in the chained mode
8749                  * TODO: need to dig out the root cause of that
8750                  */
8751                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8752                         goto skip_modeset;
8753
8754                 if (modereset_required(new_crtc_state))
8755                         goto skip_modeset;
8756
8757                 if (modeset_required(new_crtc_state, new_stream,
8758                                      dm_old_crtc_state->stream)) {
8759
8760                         WARN_ON(dm_new_crtc_state->stream);
8761
8762                         ret = dm_atomic_get_state(state, &dm_state);
8763                         if (ret)
8764                                 goto fail;
8765
8766                         dm_new_crtc_state->stream = new_stream;
8767
8768                         dc_stream_retain(new_stream);
8769
8770                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8771                                                 crtc->base.id);
8772
8773                         if (dc_add_stream_to_ctx(
8774                                         dm->dc,
8775                                         dm_state->context,
8776                                         dm_new_crtc_state->stream) != DC_OK) {
8777                                 ret = -EINVAL;
8778                                 goto fail;
8779                         }
8780
8781                         *lock_and_validation_needed = true;
8782                 }
8783         }
8784
8785 skip_modeset:
8786         /* Release extra reference */
8787         if (new_stream)
8788                  dc_stream_release(new_stream);
8789
8790         /*
8791          * We want to do dc stream updates that do not require a
8792          * full modeset below.
8793          */
8794         if (!(enable && aconnector && new_crtc_state->active))
8795                 return 0;
8796         /*
8797          * Given above conditions, the dc state cannot be NULL because:
8798          * 1. We're in the process of enabling CRTCs (just been added
8799          *    to the dc context, or already is on the context)
8800          * 2. Has a valid connector attached, and
8801          * 3. Is currently active and enabled.
8802          * => The dc stream state currently exists.
8803          */
8804         BUG_ON(dm_new_crtc_state->stream == NULL);
8805
8806         /* Scaling or underscan settings */
8807         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8808                 update_stream_scaling_settings(
8809                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8810
8811         /* ABM settings */
8812         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8813
8814         /*
8815          * Color management settings. We also update color properties
8816          * when a modeset is needed, to ensure it gets reprogrammed.
8817          */
8818         if (dm_new_crtc_state->base.color_mgmt_changed ||
8819             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8820                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8821                 if (ret)
8822                         goto fail;
8823         }
8824
8825         /* Update Freesync settings. */
8826         get_freesync_config_for_crtc(dm_new_crtc_state,
8827                                      dm_new_conn_state);
8828
8829         return ret;
8830
8831 fail:
8832         if (new_stream)
8833                 dc_stream_release(new_stream);
8834         return ret;
8835 }
8836
8837 static bool should_reset_plane(struct drm_atomic_state *state,
8838                                struct drm_plane *plane,
8839                                struct drm_plane_state *old_plane_state,
8840                                struct drm_plane_state *new_plane_state)
8841 {
8842         struct drm_plane *other;
8843         struct drm_plane_state *old_other_state, *new_other_state;
8844         struct drm_crtc_state *new_crtc_state;
8845         int i;
8846
8847         /*
8848          * TODO: Remove this hack once the checks below are sufficient
8849          * enough to determine when we need to reset all the planes on
8850          * the stream.
8851          */
8852         if (state->allow_modeset)
8853                 return true;
8854
8855         /* Exit early if we know that we're adding or removing the plane. */
8856         if (old_plane_state->crtc != new_plane_state->crtc)
8857                 return true;
8858
8859         /* old crtc == new_crtc == NULL, plane not in context. */
8860         if (!new_plane_state->crtc)
8861                 return false;
8862
8863         new_crtc_state =
8864                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8865
8866         if (!new_crtc_state)
8867                 return true;
8868
8869         /* CRTC Degamma changes currently require us to recreate planes. */
8870         if (new_crtc_state->color_mgmt_changed)
8871                 return true;
8872
8873         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8874                 return true;
8875
8876         /*
8877          * If there are any new primary or overlay planes being added or
8878          * removed then the z-order can potentially change. To ensure
8879          * correct z-order and pipe acquisition the current DC architecture
8880          * requires us to remove and recreate all existing planes.
8881          *
8882          * TODO: Come up with a more elegant solution for this.
8883          */
8884         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8885                 struct amdgpu_framebuffer *old_afb, *new_afb;
8886                 if (other->type == DRM_PLANE_TYPE_CURSOR)
8887                         continue;
8888
8889                 if (old_other_state->crtc != new_plane_state->crtc &&
8890                     new_other_state->crtc != new_plane_state->crtc)
8891                         continue;
8892
8893                 if (old_other_state->crtc != new_other_state->crtc)
8894                         return true;
8895
8896                 /* Src/dst size and scaling updates. */
8897                 if (old_other_state->src_w != new_other_state->src_w ||
8898                     old_other_state->src_h != new_other_state->src_h ||
8899                     old_other_state->crtc_w != new_other_state->crtc_w ||
8900                     old_other_state->crtc_h != new_other_state->crtc_h)
8901                         return true;
8902
8903                 /* Rotation / mirroring updates. */
8904                 if (old_other_state->rotation != new_other_state->rotation)
8905                         return true;
8906
8907                 /* Blending updates. */
8908                 if (old_other_state->pixel_blend_mode !=
8909                     new_other_state->pixel_blend_mode)
8910                         return true;
8911
8912                 /* Alpha updates. */
8913                 if (old_other_state->alpha != new_other_state->alpha)
8914                         return true;
8915
8916                 /* Colorspace changes. */
8917                 if (old_other_state->color_range != new_other_state->color_range ||
8918                     old_other_state->color_encoding != new_other_state->color_encoding)
8919                         return true;
8920
8921                 /* Framebuffer checks fall at the end. */
8922                 if (!old_other_state->fb || !new_other_state->fb)
8923                         continue;
8924
8925                 /* Pixel format changes can require bandwidth updates. */
8926                 if (old_other_state->fb->format != new_other_state->fb->format)
8927                         return true;
8928
8929                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8930                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8931
8932                 /* Tiling and DCC changes also require bandwidth updates. */
8933                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8934                     old_afb->base.modifier != new_afb->base.modifier)
8935                         return true;
8936         }
8937
8938         return false;
8939 }
8940
8941 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8942                               struct drm_plane_state *new_plane_state,
8943                               struct drm_framebuffer *fb)
8944 {
8945         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8946         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8947         unsigned int pitch;
8948         bool linear;
8949
8950         if (fb->width > new_acrtc->max_cursor_width ||
8951             fb->height > new_acrtc->max_cursor_height) {
8952                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8953                                  new_plane_state->fb->width,
8954                                  new_plane_state->fb->height);
8955                 return -EINVAL;
8956         }
8957         if (new_plane_state->src_w != fb->width << 16 ||
8958             new_plane_state->src_h != fb->height << 16) {
8959                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8960                 return -EINVAL;
8961         }
8962
8963         /* Pitch in pixels */
8964         pitch = fb->pitches[0] / fb->format->cpp[0];
8965
8966         if (fb->width != pitch) {
8967                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
8968                                  fb->width, pitch);
8969                 return -EINVAL;
8970         }
8971
8972         switch (pitch) {
8973         case 64:
8974         case 128:
8975         case 256:
8976                 /* FB pitch is supported by cursor plane */
8977                 break;
8978         default:
8979                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
8980                 return -EINVAL;
8981         }
8982
8983         /* Core DRM takes care of checking FB modifiers, so we only need to
8984          * check tiling flags when the FB doesn't have a modifier. */
8985         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
8986                 if (adev->family < AMDGPU_FAMILY_AI) {
8987                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
8988                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
8989                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
8990                 } else {
8991                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
8992                 }
8993                 if (!linear) {
8994                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
8995                         return -EINVAL;
8996                 }
8997         }
8998
8999         return 0;
9000 }
9001
9002 static int dm_update_plane_state(struct dc *dc,
9003                                  struct drm_atomic_state *state,
9004                                  struct drm_plane *plane,
9005                                  struct drm_plane_state *old_plane_state,
9006                                  struct drm_plane_state *new_plane_state,
9007                                  bool enable,
9008                                  bool *lock_and_validation_needed)
9009 {
9010
9011         struct dm_atomic_state *dm_state = NULL;
9012         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9013         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9014         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9015         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9016         struct amdgpu_crtc *new_acrtc;
9017         bool needs_reset;
9018         int ret = 0;
9019
9020
9021         new_plane_crtc = new_plane_state->crtc;
9022         old_plane_crtc = old_plane_state->crtc;
9023         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9024         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9025
9026         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9027                 if (!enable || !new_plane_crtc ||
9028                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9029                         return 0;
9030
9031                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9032
9033                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9034                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9035                         return -EINVAL;
9036                 }
9037
9038                 if (new_plane_state->fb) {
9039                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9040                                                  new_plane_state->fb);
9041                         if (ret)
9042                                 return ret;
9043                 }
9044
9045                 return 0;
9046         }
9047
9048         needs_reset = should_reset_plane(state, plane, old_plane_state,
9049                                          new_plane_state);
9050
9051         /* Remove any changed/removed planes */
9052         if (!enable) {
9053                 if (!needs_reset)
9054                         return 0;
9055
9056                 if (!old_plane_crtc)
9057                         return 0;
9058
9059                 old_crtc_state = drm_atomic_get_old_crtc_state(
9060                                 state, old_plane_crtc);
9061                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9062
9063                 if (!dm_old_crtc_state->stream)
9064                         return 0;
9065
9066                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9067                                 plane->base.id, old_plane_crtc->base.id);
9068
9069                 ret = dm_atomic_get_state(state, &dm_state);
9070                 if (ret)
9071                         return ret;
9072
9073                 if (!dc_remove_plane_from_context(
9074                                 dc,
9075                                 dm_old_crtc_state->stream,
9076                                 dm_old_plane_state->dc_state,
9077                                 dm_state->context)) {
9078
9079                         return -EINVAL;
9080                 }
9081
9082
9083                 dc_plane_state_release(dm_old_plane_state->dc_state);
9084                 dm_new_plane_state->dc_state = NULL;
9085
9086                 *lock_and_validation_needed = true;
9087
9088         } else { /* Add new planes */
9089                 struct dc_plane_state *dc_new_plane_state;
9090
9091                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9092                         return 0;
9093
9094                 if (!new_plane_crtc)
9095                         return 0;
9096
9097                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9098                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9099
9100                 if (!dm_new_crtc_state->stream)
9101                         return 0;
9102
9103                 if (!needs_reset)
9104                         return 0;
9105
9106                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9107                 if (ret)
9108                         return ret;
9109
9110                 WARN_ON(dm_new_plane_state->dc_state);
9111
9112                 dc_new_plane_state = dc_create_plane_state(dc);
9113                 if (!dc_new_plane_state)
9114                         return -ENOMEM;
9115
9116                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9117                                 plane->base.id, new_plane_crtc->base.id);
9118
9119                 ret = fill_dc_plane_attributes(
9120                         drm_to_adev(new_plane_crtc->dev),
9121                         dc_new_plane_state,
9122                         new_plane_state,
9123                         new_crtc_state);
9124                 if (ret) {
9125                         dc_plane_state_release(dc_new_plane_state);
9126                         return ret;
9127                 }
9128
9129                 ret = dm_atomic_get_state(state, &dm_state);
9130                 if (ret) {
9131                         dc_plane_state_release(dc_new_plane_state);
9132                         return ret;
9133                 }
9134
9135                 /*
9136                  * Any atomic check errors that occur after this will
9137                  * not need a release. The plane state will be attached
9138                  * to the stream, and therefore part of the atomic
9139                  * state. It'll be released when the atomic state is
9140                  * cleaned.
9141                  */
9142                 if (!dc_add_plane_to_context(
9143                                 dc,
9144                                 dm_new_crtc_state->stream,
9145                                 dc_new_plane_state,
9146                                 dm_state->context)) {
9147
9148                         dc_plane_state_release(dc_new_plane_state);
9149                         return -EINVAL;
9150                 }
9151
9152                 dm_new_plane_state->dc_state = dc_new_plane_state;
9153
9154                 /* Tell DC to do a full surface update every time there
9155                  * is a plane change. Inefficient, but works for now.
9156                  */
9157                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9158
9159                 *lock_and_validation_needed = true;
9160         }
9161
9162
9163         return ret;
9164 }
9165
9166 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9167                                 struct drm_crtc *crtc,
9168                                 struct drm_crtc_state *new_crtc_state)
9169 {
9170         struct drm_plane_state *new_cursor_state, *new_primary_state;
9171         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9172
9173         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9174          * cursor per pipe but it's going to inherit the scaling and
9175          * positioning from the underlying pipe. Check the cursor plane's
9176          * blending properties match the primary plane's. */
9177
9178         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9179         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9180         if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9181                 return 0;
9182         }
9183
9184         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9185                          (new_cursor_state->src_w >> 16);
9186         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9187                          (new_cursor_state->src_h >> 16);
9188
9189         primary_scale_w = new_primary_state->crtc_w * 1000 /
9190                          (new_primary_state->src_w >> 16);
9191         primary_scale_h = new_primary_state->crtc_h * 1000 /
9192                          (new_primary_state->src_h >> 16);
9193
9194         if (cursor_scale_w != primary_scale_w ||
9195             cursor_scale_h != primary_scale_h) {
9196                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9197                 return -EINVAL;
9198         }
9199
9200         return 0;
9201 }
9202
9203 #if defined(CONFIG_DRM_AMD_DC_DCN)
9204 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9205 {
9206         struct drm_connector *connector;
9207         struct drm_connector_state *conn_state;
9208         struct amdgpu_dm_connector *aconnector = NULL;
9209         int i;
9210         for_each_new_connector_in_state(state, connector, conn_state, i) {
9211                 if (conn_state->crtc != crtc)
9212                         continue;
9213
9214                 aconnector = to_amdgpu_dm_connector(connector);
9215                 if (!aconnector->port || !aconnector->mst_port)
9216                         aconnector = NULL;
9217                 else
9218                         break;
9219         }
9220
9221         if (!aconnector)
9222                 return 0;
9223
9224         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9225 }
9226 #endif
9227
9228 /**
9229  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9230  * @dev: The DRM device
9231  * @state: The atomic state to commit
9232  *
9233  * Validate that the given atomic state is programmable by DC into hardware.
9234  * This involves constructing a &struct dc_state reflecting the new hardware
9235  * state we wish to commit, then querying DC to see if it is programmable. It's
9236  * important not to modify the existing DC state. Otherwise, atomic_check
9237  * may unexpectedly commit hardware changes.
9238  *
9239  * When validating the DC state, it's important that the right locks are
9240  * acquired. For full updates case which removes/adds/updates streams on one
9241  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9242  * that any such full update commit will wait for completion of any outstanding
9243  * flip using DRMs synchronization events.
9244  *
9245  * Note that DM adds the affected connectors for all CRTCs in state, when that
9246  * might not seem necessary. This is because DC stream creation requires the
9247  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9248  * be possible but non-trivial - a possible TODO item.
9249  *
9250  * Return: -Error code if validation failed.
9251  */
9252 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9253                                   struct drm_atomic_state *state)
9254 {
9255         struct amdgpu_device *adev = drm_to_adev(dev);
9256         struct dm_atomic_state *dm_state = NULL;
9257         struct dc *dc = adev->dm.dc;
9258         struct drm_connector *connector;
9259         struct drm_connector_state *old_con_state, *new_con_state;
9260         struct drm_crtc *crtc;
9261         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9262         struct drm_plane *plane;
9263         struct drm_plane_state *old_plane_state, *new_plane_state;
9264         enum dc_status status;
9265         int ret, i;
9266         bool lock_and_validation_needed = false;
9267         struct dm_crtc_state *dm_old_crtc_state;
9268
9269         trace_amdgpu_dm_atomic_check_begin(state);
9270
9271         ret = drm_atomic_helper_check_modeset(dev, state);
9272         if (ret)
9273                 goto fail;
9274
9275         /* Check connector changes */
9276         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9277                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9278                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9279
9280                 /* Skip connectors that are disabled or part of modeset already. */
9281                 if (!old_con_state->crtc && !new_con_state->crtc)
9282                         continue;
9283
9284                 if (!new_con_state->crtc)
9285                         continue;
9286
9287                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9288                 if (IS_ERR(new_crtc_state)) {
9289                         ret = PTR_ERR(new_crtc_state);
9290                         goto fail;
9291                 }
9292
9293                 if (dm_old_con_state->abm_level !=
9294                     dm_new_con_state->abm_level)
9295                         new_crtc_state->connectors_changed = true;
9296         }
9297
9298 #if defined(CONFIG_DRM_AMD_DC_DCN)
9299         if (adev->asic_type >= CHIP_NAVI10) {
9300                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9301                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9302                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9303                                 if (ret)
9304                                         goto fail;
9305                         }
9306                 }
9307         }
9308 #endif
9309         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9310                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9311
9312                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9313                     !new_crtc_state->color_mgmt_changed &&
9314                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9315                         dm_old_crtc_state->dsc_force_changed == false)
9316                         continue;
9317
9318                 if (!new_crtc_state->enable)
9319                         continue;
9320
9321                 ret = drm_atomic_add_affected_connectors(state, crtc);
9322                 if (ret)
9323                         return ret;
9324
9325                 ret = drm_atomic_add_affected_planes(state, crtc);
9326                 if (ret)
9327                         goto fail;
9328
9329                 if (dm_old_crtc_state->dsc_force_changed)
9330                         new_crtc_state->mode_changed = true;
9331         }
9332
9333         /*
9334          * Add all primary and overlay planes on the CRTC to the state
9335          * whenever a plane is enabled to maintain correct z-ordering
9336          * and to enable fast surface updates.
9337          */
9338         drm_for_each_crtc(crtc, dev) {
9339                 bool modified = false;
9340
9341                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9342                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9343                                 continue;
9344
9345                         if (new_plane_state->crtc == crtc ||
9346                             old_plane_state->crtc == crtc) {
9347                                 modified = true;
9348                                 break;
9349                         }
9350                 }
9351
9352                 if (!modified)
9353                         continue;
9354
9355                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9356                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9357                                 continue;
9358
9359                         new_plane_state =
9360                                 drm_atomic_get_plane_state(state, plane);
9361
9362                         if (IS_ERR(new_plane_state)) {
9363                                 ret = PTR_ERR(new_plane_state);
9364                                 goto fail;
9365                         }
9366                 }
9367         }
9368
9369         /* Remove exiting planes if they are modified */
9370         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9371                 ret = dm_update_plane_state(dc, state, plane,
9372                                             old_plane_state,
9373                                             new_plane_state,
9374                                             false,
9375                                             &lock_and_validation_needed);
9376                 if (ret)
9377                         goto fail;
9378         }
9379
9380         /* Disable all crtcs which require disable */
9381         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9382                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9383                                            old_crtc_state,
9384                                            new_crtc_state,
9385                                            false,
9386                                            &lock_and_validation_needed);
9387                 if (ret)
9388                         goto fail;
9389         }
9390
9391         /* Enable all crtcs which require enable */
9392         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9393                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9394                                            old_crtc_state,
9395                                            new_crtc_state,
9396                                            true,
9397                                            &lock_and_validation_needed);
9398                 if (ret)
9399                         goto fail;
9400         }
9401
9402         /* Add new/modified planes */
9403         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9404                 ret = dm_update_plane_state(dc, state, plane,
9405                                             old_plane_state,
9406                                             new_plane_state,
9407                                             true,
9408                                             &lock_and_validation_needed);
9409                 if (ret)
9410                         goto fail;
9411         }
9412
9413         /* Run this here since we want to validate the streams we created */
9414         ret = drm_atomic_helper_check_planes(dev, state);
9415         if (ret)
9416                 goto fail;
9417
9418         /* Check cursor planes scaling */
9419         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9420                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9421                 if (ret)
9422                         goto fail;
9423         }
9424
9425         if (state->legacy_cursor_update) {
9426                 /*
9427                  * This is a fast cursor update coming from the plane update
9428                  * helper, check if it can be done asynchronously for better
9429                  * performance.
9430                  */
9431                 state->async_update =
9432                         !drm_atomic_helper_async_check(dev, state);
9433
9434                 /*
9435                  * Skip the remaining global validation if this is an async
9436                  * update. Cursor updates can be done without affecting
9437                  * state or bandwidth calcs and this avoids the performance
9438                  * penalty of locking the private state object and
9439                  * allocating a new dc_state.
9440                  */
9441                 if (state->async_update)
9442                         return 0;
9443         }
9444
9445         /* Check scaling and underscan changes*/
9446         /* TODO Removed scaling changes validation due to inability to commit
9447          * new stream into context w\o causing full reset. Need to
9448          * decide how to handle.
9449          */
9450         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9451                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9452                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9453                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9454
9455                 /* Skip any modesets/resets */
9456                 if (!acrtc || drm_atomic_crtc_needs_modeset(
9457                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9458                         continue;
9459
9460                 /* Skip any thing not scale or underscan changes */
9461                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9462                         continue;
9463
9464                 lock_and_validation_needed = true;
9465         }
9466
9467         /**
9468          * Streams and planes are reset when there are changes that affect
9469          * bandwidth. Anything that affects bandwidth needs to go through
9470          * DC global validation to ensure that the configuration can be applied
9471          * to hardware.
9472          *
9473          * We have to currently stall out here in atomic_check for outstanding
9474          * commits to finish in this case because our IRQ handlers reference
9475          * DRM state directly - we can end up disabling interrupts too early
9476          * if we don't.
9477          *
9478          * TODO: Remove this stall and drop DM state private objects.
9479          */
9480         if (lock_and_validation_needed) {
9481                 ret = dm_atomic_get_state(state, &dm_state);
9482                 if (ret)
9483                         goto fail;
9484
9485                 ret = do_aquire_global_lock(dev, state);
9486                 if (ret)
9487                         goto fail;
9488
9489 #if defined(CONFIG_DRM_AMD_DC_DCN)
9490                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9491                         goto fail;
9492
9493                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9494                 if (ret)
9495                         goto fail;
9496 #endif
9497
9498                 /*
9499                  * Perform validation of MST topology in the state:
9500                  * We need to perform MST atomic check before calling
9501                  * dc_validate_global_state(), or there is a chance
9502                  * to get stuck in an infinite loop and hang eventually.
9503                  */
9504                 ret = drm_dp_mst_atomic_check(state);
9505                 if (ret)
9506                         goto fail;
9507                 status = dc_validate_global_state(dc, dm_state->context, false);
9508                 if (status != DC_OK) {
9509                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
9510                                        dc_status_to_str(status), status);
9511                         ret = -EINVAL;
9512                         goto fail;
9513                 }
9514         } else {
9515                 /*
9516                  * The commit is a fast update. Fast updates shouldn't change
9517                  * the DC context, affect global validation, and can have their
9518                  * commit work done in parallel with other commits not touching
9519                  * the same resource. If we have a new DC context as part of
9520                  * the DM atomic state from validation we need to free it and
9521                  * retain the existing one instead.
9522                  *
9523                  * Furthermore, since the DM atomic state only contains the DC
9524                  * context and can safely be annulled, we can free the state
9525                  * and clear the associated private object now to free
9526                  * some memory and avoid a possible use-after-free later.
9527                  */
9528
9529                 for (i = 0; i < state->num_private_objs; i++) {
9530                         struct drm_private_obj *obj = state->private_objs[i].ptr;
9531
9532                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
9533                                 int j = state->num_private_objs-1;
9534
9535                                 dm_atomic_destroy_state(obj,
9536                                                 state->private_objs[i].state);
9537
9538                                 /* If i is not at the end of the array then the
9539                                  * last element needs to be moved to where i was
9540                                  * before the array can safely be truncated.
9541                                  */
9542                                 if (i != j)
9543                                         state->private_objs[i] =
9544                                                 state->private_objs[j];
9545
9546                                 state->private_objs[j].ptr = NULL;
9547                                 state->private_objs[j].state = NULL;
9548                                 state->private_objs[j].old_state = NULL;
9549                                 state->private_objs[j].new_state = NULL;
9550
9551                                 state->num_private_objs = j;
9552                                 break;
9553                         }
9554                 }
9555         }
9556
9557         /* Store the overall update type for use later in atomic check. */
9558         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9559                 struct dm_crtc_state *dm_new_crtc_state =
9560                         to_dm_crtc_state(new_crtc_state);
9561
9562                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9563                                                          UPDATE_TYPE_FULL :
9564                                                          UPDATE_TYPE_FAST;
9565         }
9566
9567         /* Must be success */
9568         WARN_ON(ret);
9569
9570         trace_amdgpu_dm_atomic_check_finish(state, ret);
9571
9572         return ret;
9573
9574 fail:
9575         if (ret == -EDEADLK)
9576                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9577         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9578                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9579         else
9580                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9581
9582         trace_amdgpu_dm_atomic_check_finish(state, ret);
9583
9584         return ret;
9585 }
9586
9587 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9588                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
9589 {
9590         uint8_t dpcd_data;
9591         bool capable = false;
9592
9593         if (amdgpu_dm_connector->dc_link &&
9594                 dm_helpers_dp_read_dpcd(
9595                                 NULL,
9596                                 amdgpu_dm_connector->dc_link,
9597                                 DP_DOWN_STREAM_PORT_COUNT,
9598                                 &dpcd_data,
9599                                 sizeof(dpcd_data))) {
9600                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9601         }
9602
9603         return capable;
9604 }
9605 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9606                                         struct edid *edid)
9607 {
9608         int i;
9609         bool edid_check_required;
9610         struct detailed_timing *timing;
9611         struct detailed_non_pixel *data;
9612         struct detailed_data_monitor_range *range;
9613         struct amdgpu_dm_connector *amdgpu_dm_connector =
9614                         to_amdgpu_dm_connector(connector);
9615         struct dm_connector_state *dm_con_state = NULL;
9616
9617         struct drm_device *dev = connector->dev;
9618         struct amdgpu_device *adev = drm_to_adev(dev);
9619         bool freesync_capable = false;
9620
9621         if (!connector->state) {
9622                 DRM_ERROR("%s - Connector has no state", __func__);
9623                 goto update;
9624         }
9625
9626         if (!edid) {
9627                 dm_con_state = to_dm_connector_state(connector->state);
9628
9629                 amdgpu_dm_connector->min_vfreq = 0;
9630                 amdgpu_dm_connector->max_vfreq = 0;
9631                 amdgpu_dm_connector->pixel_clock_mhz = 0;
9632
9633                 goto update;
9634         }
9635
9636         dm_con_state = to_dm_connector_state(connector->state);
9637
9638         edid_check_required = false;
9639         if (!amdgpu_dm_connector->dc_sink) {
9640                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9641                 goto update;
9642         }
9643         if (!adev->dm.freesync_module)
9644                 goto update;
9645         /*
9646          * if edid non zero restrict freesync only for dp and edp
9647          */
9648         if (edid) {
9649                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9650                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9651                         edid_check_required = is_dp_capable_without_timing_msa(
9652                                                 adev->dm.dc,
9653                                                 amdgpu_dm_connector);
9654                 }
9655         }
9656         if (edid_check_required == true && (edid->version > 1 ||
9657            (edid->version == 1 && edid->revision > 1))) {
9658                 for (i = 0; i < 4; i++) {
9659
9660                         timing  = &edid->detailed_timings[i];
9661                         data    = &timing->data.other_data;
9662                         range   = &data->data.range;
9663                         /*
9664                          * Check if monitor has continuous frequency mode
9665                          */
9666                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
9667                                 continue;
9668                         /*
9669                          * Check for flag range limits only. If flag == 1 then
9670                          * no additional timing information provided.
9671                          * Default GTF, GTF Secondary curve and CVT are not
9672                          * supported
9673                          */
9674                         if (range->flags != 1)
9675                                 continue;
9676
9677                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9678                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9679                         amdgpu_dm_connector->pixel_clock_mhz =
9680                                 range->pixel_clock_mhz * 10;
9681
9682                         connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9683                         connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9684
9685                         break;
9686                 }
9687
9688                 if (amdgpu_dm_connector->max_vfreq -
9689                     amdgpu_dm_connector->min_vfreq > 10) {
9690
9691                         freesync_capable = true;
9692                 }
9693         }
9694
9695 update:
9696         if (dm_con_state)
9697                 dm_con_state->freesync_capable = freesync_capable;
9698
9699         if (connector->vrr_capable_property)
9700                 drm_connector_set_vrr_capable_property(connector,
9701                                                        freesync_capable);
9702 }
9703
9704 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9705 {
9706         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9707
9708         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9709                 return;
9710         if (link->type == dc_connection_none)
9711                 return;
9712         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9713                                         dpcd_data, sizeof(dpcd_data))) {
9714                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9715
9716                 if (dpcd_data[0] == 0) {
9717                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9718                         link->psr_settings.psr_feature_enabled = false;
9719                 } else {
9720                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
9721                         link->psr_settings.psr_feature_enabled = true;
9722                 }
9723
9724                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9725         }
9726 }
9727
9728 /*
9729  * amdgpu_dm_link_setup_psr() - configure psr link
9730  * @stream: stream state
9731  *
9732  * Return: true if success
9733  */
9734 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9735 {
9736         struct dc_link *link = NULL;
9737         struct psr_config psr_config = {0};
9738         struct psr_context psr_context = {0};
9739         bool ret = false;
9740
9741         if (stream == NULL)
9742                 return false;
9743
9744         link = stream->link;
9745
9746         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9747
9748         if (psr_config.psr_version > 0) {
9749                 psr_config.psr_exit_link_training_required = 0x1;
9750                 psr_config.psr_frame_capture_indication_req = 0;
9751                 psr_config.psr_rfb_setup_time = 0x37;
9752                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9753                 psr_config.allow_smu_optimizations = 0x0;
9754
9755                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9756
9757         }
9758         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
9759
9760         return ret;
9761 }
9762
9763 /*
9764  * amdgpu_dm_psr_enable() - enable psr f/w
9765  * @stream: stream state
9766  *
9767  * Return: true if success
9768  */
9769 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9770 {
9771         struct dc_link *link = stream->link;
9772         unsigned int vsync_rate_hz = 0;
9773         struct dc_static_screen_params params = {0};
9774         /* Calculate number of static frames before generating interrupt to
9775          * enter PSR.
9776          */
9777         // Init fail safe of 2 frames static
9778         unsigned int num_frames_static = 2;
9779
9780         DRM_DEBUG_DRIVER("Enabling psr...\n");
9781
9782         vsync_rate_hz = div64_u64(div64_u64((
9783                         stream->timing.pix_clk_100hz * 100),
9784                         stream->timing.v_total),
9785                         stream->timing.h_total);
9786
9787         /* Round up
9788          * Calculate number of frames such that at least 30 ms of time has
9789          * passed.
9790          */
9791         if (vsync_rate_hz != 0) {
9792                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9793                 num_frames_static = (30000 / frame_time_microsec) + 1;
9794         }
9795
9796         params.triggers.cursor_update = true;
9797         params.triggers.overlay_update = true;
9798         params.triggers.surface_update = true;
9799         params.num_frames = num_frames_static;
9800
9801         dc_stream_set_static_screen_params(link->ctx->dc,
9802                                            &stream, 1,
9803                                            &params);
9804
9805         return dc_link_set_psr_allow_active(link, true, false, false);
9806 }
9807
9808 /*
9809  * amdgpu_dm_psr_disable() - disable psr f/w
9810  * @stream:  stream state
9811  *
9812  * Return: true if success
9813  */
9814 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9815 {
9816
9817         DRM_DEBUG_DRIVER("Disabling psr...\n");
9818
9819         return dc_link_set_psr_allow_active(stream->link, false, true, false);
9820 }
9821
9822 /*
9823  * amdgpu_dm_psr_disable() - disable psr f/w
9824  * if psr is enabled on any stream
9825  *
9826  * Return: true if success
9827  */
9828 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9829 {
9830         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9831         return dc_set_psr_allow_active(dm->dc, false);
9832 }
9833
9834 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9835 {
9836         struct amdgpu_device *adev = drm_to_adev(dev);
9837         struct dc *dc = adev->dm.dc;
9838         int i;
9839
9840         mutex_lock(&adev->dm.dc_lock);
9841         if (dc->current_state) {
9842                 for (i = 0; i < dc->current_state->stream_count; ++i)
9843                         dc->current_state->streams[i]
9844                                 ->triggered_crtc_reset.enabled =
9845                                 adev->dm.force_timing_sync;
9846
9847                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9848                 dc_trigger_sync(dc, dc->current_state);
9849         }
9850         mutex_unlock(&adev->dm.dc_lock);
9851 }
9852
9853 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9854                        uint32_t value, const char *func_name)
9855 {
9856 #ifdef DM_CHECK_ADDR_0
9857         if (address == 0) {
9858                 DC_ERR("invalid register write. address = 0");
9859                 return;
9860         }
9861 #endif
9862         cgs_write_register(ctx->cgs_device, address, value);
9863         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9864 }
9865
9866 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9867                           const char *func_name)
9868 {
9869         uint32_t value;
9870 #ifdef DM_CHECK_ADDR_0
9871         if (address == 0) {
9872                 DC_ERR("invalid register read; address = 0\n");
9873                 return 0;
9874         }
9875 #endif
9876
9877         if (ctx->dmub_srv &&
9878             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9879             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9880                 ASSERT(false);
9881                 return 0;
9882         }
9883
9884         value = cgs_read_register(ctx->cgs_device, address);
9885
9886         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9887
9888         return value;
9889 }