Merge tag 'amd-drm-next-5.14-2021-05-21' of https://gitlab.freedesktop.org/agd5f...
[linux-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "dc/dc_stat.h"
39 #include "amdgpu_dm_trace.h"
40
41 #include "vid.h"
42 #include "amdgpu.h"
43 #include "amdgpu_display.h"
44 #include "amdgpu_ucode.h"
45 #include "atom.h"
46 #include "amdgpu_dm.h"
47 #ifdef CONFIG_DRM_AMD_DC_HDCP
48 #include "amdgpu_dm_hdcp.h"
49 #include <drm/drm_hdcp.h>
50 #endif
51 #include "amdgpu_pm.h"
52
53 #include "amd_shared.h"
54 #include "amdgpu_dm_irq.h"
55 #include "dm_helpers.h"
56 #include "amdgpu_dm_mst_types.h"
57 #if defined(CONFIG_DEBUG_FS)
58 #include "amdgpu_dm_debugfs.h"
59 #endif
60
61 #include "ivsrcid/ivsrcid_vislands30.h"
62
63 #include "i2caux_interface.h"
64 #include <linux/module.h>
65 #include <linux/moduleparam.h>
66 #include <linux/types.h>
67 #include <linux/pm_runtime.h>
68 #include <linux/pci.h>
69 #include <linux/firmware.h>
70 #include <linux/component.h>
71
72 #include <drm/drm_atomic.h>
73 #include <drm/drm_atomic_uapi.h>
74 #include <drm/drm_atomic_helper.h>
75 #include <drm/drm_dp_mst_helper.h>
76 #include <drm/drm_fb_helper.h>
77 #include <drm/drm_fourcc.h>
78 #include <drm/drm_edid.h>
79 #include <drm/drm_vblank.h>
80 #include <drm/drm_audio_component.h>
81
82 #if defined(CONFIG_DRM_AMD_DC_DCN)
83 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
84
85 #include "dcn/dcn_1_0_offset.h"
86 #include "dcn/dcn_1_0_sh_mask.h"
87 #include "soc15_hw_ip.h"
88 #include "vega10_ip_offset.h"
89
90 #include "soc15_common.h"
91 #endif
92
93 #include "modules/inc/mod_freesync.h"
94 #include "modules/power/power_helpers.h"
95 #include "modules/inc/mod_info_packet.h"
96
97 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
99 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
101 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
107 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
111
112 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
114
115 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
116 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
117
118 /* Number of bytes in PSP header for firmware. */
119 #define PSP_HEADER_BYTES 0x100
120
121 /* Number of bytes in PSP footer for firmware. */
122 #define PSP_FOOTER_BYTES 0x100
123
124 /**
125  * DOC: overview
126  *
127  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
128  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
129  * requests into DC requests, and DC responses into DRM responses.
130  *
131  * The root control structure is &struct amdgpu_display_manager.
132  */
133
134 /* basic init/fini API */
135 static int amdgpu_dm_init(struct amdgpu_device *adev);
136 static void amdgpu_dm_fini(struct amdgpu_device *adev);
137 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
138
139 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
140 {
141         switch (link->dpcd_caps.dongle_type) {
142         case DISPLAY_DONGLE_NONE:
143                 return DRM_MODE_SUBCONNECTOR_Native;
144         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
145                 return DRM_MODE_SUBCONNECTOR_VGA;
146         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
147         case DISPLAY_DONGLE_DP_DVI_DONGLE:
148                 return DRM_MODE_SUBCONNECTOR_DVID;
149         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
150         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
151                 return DRM_MODE_SUBCONNECTOR_HDMIA;
152         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
153         default:
154                 return DRM_MODE_SUBCONNECTOR_Unknown;
155         }
156 }
157
158 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
159 {
160         struct dc_link *link = aconnector->dc_link;
161         struct drm_connector *connector = &aconnector->base;
162         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
163
164         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
165                 return;
166
167         if (aconnector->dc_sink)
168                 subconnector = get_subconnector_type(link);
169
170         drm_object_property_set_value(&connector->base,
171                         connector->dev->mode_config.dp_subconnector_property,
172                         subconnector);
173 }
174
175 /*
176  * initializes drm_device display related structures, based on the information
177  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
178  * drm_encoder, drm_mode_config
179  *
180  * Returns 0 on success
181  */
182 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
183 /* removes and deallocates the drm structures, created by the above function */
184 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
185
186 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
187                                 struct drm_plane *plane,
188                                 unsigned long possible_crtcs,
189                                 const struct dc_plane_cap *plane_cap);
190 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
191                                struct drm_plane *plane,
192                                uint32_t link_index);
193 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
194                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
195                                     uint32_t link_index,
196                                     struct amdgpu_encoder *amdgpu_encoder);
197 static int amdgpu_dm_encoder_init(struct drm_device *dev,
198                                   struct amdgpu_encoder *aencoder,
199                                   uint32_t link_index);
200
201 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
202
203 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
204
205 static int amdgpu_dm_atomic_check(struct drm_device *dev,
206                                   struct drm_atomic_state *state);
207
208 static void handle_cursor_update(struct drm_plane *plane,
209                                  struct drm_plane_state *old_plane_state);
210
211 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
212 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
214 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
216
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
220 static bool
221 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
222                                  struct drm_crtc_state *new_crtc_state);
223 /*
224  * dm_vblank_get_counter
225  *
226  * @brief
227  * Get counter for number of vertical blanks
228  *
229  * @param
230  * struct amdgpu_device *adev - [in] desired amdgpu device
231  * int disp_idx - [in] which CRTC to get the counter from
232  *
233  * @return
234  * Counter for vertical blanks
235  */
236 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
237 {
238         if (crtc >= adev->mode_info.num_crtc)
239                 return 0;
240         else {
241                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
242
243                 if (acrtc->dm_irq_params.stream == NULL) {
244                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
245                                   crtc);
246                         return 0;
247                 }
248
249                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
250         }
251 }
252
253 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
254                                   u32 *vbl, u32 *position)
255 {
256         uint32_t v_blank_start, v_blank_end, h_position, v_position;
257
258         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
259                 return -EINVAL;
260         else {
261                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
262
263                 if (acrtc->dm_irq_params.stream ==  NULL) {
264                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
265                                   crtc);
266                         return 0;
267                 }
268
269                 /*
270                  * TODO rework base driver to use values directly.
271                  * for now parse it back into reg-format
272                  */
273                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
274                                          &v_blank_start,
275                                          &v_blank_end,
276                                          &h_position,
277                                          &v_position);
278
279                 *position = v_position | (h_position << 16);
280                 *vbl = v_blank_start | (v_blank_end << 16);
281         }
282
283         return 0;
284 }
285
286 static bool dm_is_idle(void *handle)
287 {
288         /* XXX todo */
289         return true;
290 }
291
292 static int dm_wait_for_idle(void *handle)
293 {
294         /* XXX todo */
295         return 0;
296 }
297
298 static bool dm_check_soft_reset(void *handle)
299 {
300         return false;
301 }
302
303 static int dm_soft_reset(void *handle)
304 {
305         /* XXX todo */
306         return 0;
307 }
308
309 static struct amdgpu_crtc *
310 get_crtc_by_otg_inst(struct amdgpu_device *adev,
311                      int otg_inst)
312 {
313         struct drm_device *dev = adev_to_drm(adev);
314         struct drm_crtc *crtc;
315         struct amdgpu_crtc *amdgpu_crtc;
316
317         if (otg_inst == -1) {
318                 WARN_ON(1);
319                 return adev->mode_info.crtcs[0];
320         }
321
322         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
323                 amdgpu_crtc = to_amdgpu_crtc(crtc);
324
325                 if (amdgpu_crtc->otg_inst == otg_inst)
326                         return amdgpu_crtc;
327         }
328
329         return NULL;
330 }
331
332 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
333 {
334         return acrtc->dm_irq_params.freesync_config.state ==
335                        VRR_STATE_ACTIVE_VARIABLE ||
336                acrtc->dm_irq_params.freesync_config.state ==
337                        VRR_STATE_ACTIVE_FIXED;
338 }
339
340 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
341 {
342         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
343                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
344 }
345
346 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
347                                               struct dm_crtc_state *new_state)
348 {
349         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
350                 return true;
351         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
352                 return true;
353         else
354                 return false;
355 }
356
357 /**
358  * dm_pflip_high_irq() - Handle pageflip interrupt
359  * @interrupt_params: ignored
360  *
361  * Handles the pageflip interrupt by notifying all interested parties
362  * that the pageflip has been completed.
363  */
364 static void dm_pflip_high_irq(void *interrupt_params)
365 {
366         struct amdgpu_crtc *amdgpu_crtc;
367         struct common_irq_params *irq_params = interrupt_params;
368         struct amdgpu_device *adev = irq_params->adev;
369         unsigned long flags;
370         struct drm_pending_vblank_event *e;
371         uint32_t vpos, hpos, v_blank_start, v_blank_end;
372         bool vrr_active;
373
374         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
375
376         /* IRQ could occur when in initial stage */
377         /* TODO work and BO cleanup */
378         if (amdgpu_crtc == NULL) {
379                 DC_LOG_PFLIP("CRTC is null, returning.\n");
380                 return;
381         }
382
383         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
384
385         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
386                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
387                                                  amdgpu_crtc->pflip_status,
388                                                  AMDGPU_FLIP_SUBMITTED,
389                                                  amdgpu_crtc->crtc_id,
390                                                  amdgpu_crtc);
391                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
392                 return;
393         }
394
395         /* page flip completed. */
396         e = amdgpu_crtc->event;
397         amdgpu_crtc->event = NULL;
398
399         if (!e)
400                 WARN_ON(1);
401
402         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403
404         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405         if (!vrr_active ||
406             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407                                       &v_blank_end, &hpos, &vpos) ||
408             (vpos < v_blank_start)) {
409                 /* Update to correct count and vblank timestamp if racing with
410                  * vblank irq. This also updates to the correct vblank timestamp
411                  * even in VRR mode, as scanout is past the front-porch atm.
412                  */
413                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414
415                 /* Wake up userspace by sending the pageflip event with proper
416                  * count and timestamp of vblank of flip completion.
417                  */
418                 if (e) {
419                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421                         /* Event sent, so done with vblank for this flip */
422                         drm_crtc_vblank_put(&amdgpu_crtc->base);
423                 }
424         } else if (e) {
425                 /* VRR active and inside front-porch: vblank count and
426                  * timestamp for pageflip event will only be up to date after
427                  * drm_crtc_handle_vblank() has been executed from late vblank
428                  * irq handler after start of back-porch (vline 0). We queue the
429                  * pageflip event for send-out by drm_crtc_handle_vblank() with
430                  * updated timestamp and count, once it runs after us.
431                  *
432                  * We need to open-code this instead of using the helper
433                  * drm_crtc_arm_vblank_event(), as that helper would
434                  * call drm_crtc_accurate_vblank_count(), which we must
435                  * not call in VRR mode while we are in front-porch!
436                  */
437
438                 /* sequence will be replaced by real count during send-out. */
439                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440                 e->pipe = amdgpu_crtc->crtc_id;
441
442                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443                 e = NULL;
444         }
445
446         /* Keep track of vblank of this flip for flip throttling. We use the
447          * cooked hw counter, as that one incremented at start of this vblank
448          * of pageflip completion, so last_flip_vblank is the forbidden count
449          * for queueing new pageflips if vsync + VRR is enabled.
450          */
451         amdgpu_crtc->dm_irq_params.last_flip_vblank =
452                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453
454         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456
457         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458                      amdgpu_crtc->crtc_id, amdgpu_crtc,
459                      vrr_active, (int) !e);
460 }
461
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464         struct common_irq_params *irq_params = interrupt_params;
465         struct amdgpu_device *adev = irq_params->adev;
466         struct amdgpu_crtc *acrtc;
467         struct drm_device *drm_dev;
468         struct drm_vblank_crtc *vblank;
469         ktime_t frame_duration_ns, previous_timestamp;
470         unsigned long flags;
471         int vrr_active;
472
473         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475         if (acrtc) {
476                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477                 drm_dev = acrtc->base.dev;
478                 vblank = &drm_dev->vblank[acrtc->base.index];
479                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480                 frame_duration_ns = vblank->time - previous_timestamp;
481
482                 if (frame_duration_ns > 0) {
483                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
484                                                 frame_duration_ns,
485                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
487                 }
488
489                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490                               acrtc->crtc_id,
491                               vrr_active);
492
493                 /* Core vblank handling is done here after end of front-porch in
494                  * vrr mode, as vblank timestamping will give valid results
495                  * while now done after front-porch. This will also deliver
496                  * page-flip completion events that have been queued to us
497                  * if a pageflip happened inside front-porch.
498                  */
499                 if (vrr_active) {
500                         drm_crtc_handle_vblank(&acrtc->base);
501
502                         /* BTR processing for pre-DCE12 ASICs */
503                         if (acrtc->dm_irq_params.stream &&
504                             adev->family < AMDGPU_FAMILY_AI) {
505                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506                                 mod_freesync_handle_v_update(
507                                     adev->dm.freesync_module,
508                                     acrtc->dm_irq_params.stream,
509                                     &acrtc->dm_irq_params.vrr_params);
510
511                                 dc_stream_adjust_vmin_vmax(
512                                     adev->dm.dc,
513                                     acrtc->dm_irq_params.stream,
514                                     &acrtc->dm_irq_params.vrr_params.adjust);
515                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516                         }
517                 }
518         }
519 }
520
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530         struct common_irq_params *irq_params = interrupt_params;
531         struct amdgpu_device *adev = irq_params->adev;
532         struct amdgpu_crtc *acrtc;
533         unsigned long flags;
534         int vrr_active;
535
536         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537         if (!acrtc)
538                 return;
539
540         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541
542         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543                       vrr_active, acrtc->dm_irq_params.active_planes);
544
545         /**
546          * Core vblank handling at start of front-porch is only possible
547          * in non-vrr mode, as only there vblank timestamping will give
548          * valid results while done in front-porch. Otherwise defer it
549          * to dm_vupdate_high_irq after end of front-porch.
550          */
551         if (!vrr_active)
552                 drm_crtc_handle_vblank(&acrtc->base);
553
554         /**
555          * Following stuff must happen at start of vblank, for crc
556          * computation and below-the-range btr support in vrr mode.
557          */
558         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559
560         /* BTR updates need to happen before VUPDATE on Vega and above. */
561         if (adev->family < AMDGPU_FAMILY_AI)
562                 return;
563
564         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565
566         if (acrtc->dm_irq_params.stream &&
567             acrtc->dm_irq_params.vrr_params.supported &&
568             acrtc->dm_irq_params.freesync_config.state ==
569                     VRR_STATE_ACTIVE_VARIABLE) {
570                 mod_freesync_handle_v_update(adev->dm.freesync_module,
571                                              acrtc->dm_irq_params.stream,
572                                              &acrtc->dm_irq_params.vrr_params);
573
574                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575                                            &acrtc->dm_irq_params.vrr_params.adjust);
576         }
577
578         /*
579          * If there aren't any active_planes then DCH HUBP may be clock-gated.
580          * In that case, pageflip completion interrupts won't fire and pageflip
581          * completion events won't get delivered. Prevent this by sending
582          * pending pageflip events from here if a flip is still pending.
583          *
584          * If any planes are enabled, use dm_pflip_high_irq() instead, to
585          * avoid race conditions between flip programming and completion,
586          * which could cause too early flip completion events.
587          */
588         if (adev->family >= AMDGPU_FAMILY_RV &&
589             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590             acrtc->dm_irq_params.active_planes == 0) {
591                 if (acrtc->event) {
592                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593                         acrtc->event = NULL;
594                         drm_crtc_vblank_put(&acrtc->base);
595                 }
596                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597         }
598
599         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 /**
604  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
605  * DCN generation ASICs
606  * @interrupt params - interrupt parameters
607  *
608  * Used to set crc window/read out crc value at vertical line 0 position
609  */
610 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613         struct common_irq_params *irq_params = interrupt_params;
614         struct amdgpu_device *adev = irq_params->adev;
615         struct amdgpu_crtc *acrtc;
616
617         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619         if (!acrtc)
620                 return;
621
622         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif
625
626 /**
627  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
628  * @interrupt_params: used for determining the Outbox instance
629  *
630  * Handles the Outbox Interrupt
631  * event handler.
632  */
633 #define DMUB_TRACE_MAX_READ 64
634 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
635 {
636         struct dmub_notification notify;
637         struct common_irq_params *irq_params = interrupt_params;
638         struct amdgpu_device *adev = irq_params->adev;
639         struct amdgpu_display_manager *dm = &adev->dm;
640         struct dmcub_trace_buf_entry entry = { 0 };
641         uint32_t count = 0;
642
643         if (dc_enable_dmub_notifications(adev->dm.dc)) {
644                 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
645                         do {
646                                 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
647                         } while (notify.pending_notification);
648
649                         if (adev->dm.dmub_notify)
650                                 memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
651                         if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
652                                 complete(&adev->dm.dmub_aux_transfer_done);
653                         // TODO : HPD Implementation
654
655                 } else {
656                         DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
657                 }
658         }
659
660
661         do {
662                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
663                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
664                                                         entry.param0, entry.param1);
665
666                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
667                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
668                 } else
669                         break;
670
671                 count++;
672
673         } while (count <= DMUB_TRACE_MAX_READ);
674
675         ASSERT(count <= DMUB_TRACE_MAX_READ);
676 }
677 #endif
678
679 static int dm_set_clockgating_state(void *handle,
680                   enum amd_clockgating_state state)
681 {
682         return 0;
683 }
684
685 static int dm_set_powergating_state(void *handle,
686                   enum amd_powergating_state state)
687 {
688         return 0;
689 }
690
691 /* Prototypes of private functions */
692 static int dm_early_init(void* handle);
693
694 /* Allocate memory for FBC compressed data  */
695 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
696 {
697         struct drm_device *dev = connector->dev;
698         struct amdgpu_device *adev = drm_to_adev(dev);
699         struct dm_compressor_info *compressor = &adev->dm.compressor;
700         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
701         struct drm_display_mode *mode;
702         unsigned long max_size = 0;
703
704         if (adev->dm.dc->fbc_compressor == NULL)
705                 return;
706
707         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
708                 return;
709
710         if (compressor->bo_ptr)
711                 return;
712
713
714         list_for_each_entry(mode, &connector->modes, head) {
715                 if (max_size < mode->htotal * mode->vtotal)
716                         max_size = mode->htotal * mode->vtotal;
717         }
718
719         if (max_size) {
720                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
721                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
722                             &compressor->gpu_addr, &compressor->cpu_addr);
723
724                 if (r)
725                         DRM_ERROR("DM: Failed to initialize FBC\n");
726                 else {
727                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
728                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
729                 }
730
731         }
732
733 }
734
735 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
736                                           int pipe, bool *enabled,
737                                           unsigned char *buf, int max_bytes)
738 {
739         struct drm_device *dev = dev_get_drvdata(kdev);
740         struct amdgpu_device *adev = drm_to_adev(dev);
741         struct drm_connector *connector;
742         struct drm_connector_list_iter conn_iter;
743         struct amdgpu_dm_connector *aconnector;
744         int ret = 0;
745
746         *enabled = false;
747
748         mutex_lock(&adev->dm.audio_lock);
749
750         drm_connector_list_iter_begin(dev, &conn_iter);
751         drm_for_each_connector_iter(connector, &conn_iter) {
752                 aconnector = to_amdgpu_dm_connector(connector);
753                 if (aconnector->audio_inst != port)
754                         continue;
755
756                 *enabled = true;
757                 ret = drm_eld_size(connector->eld);
758                 memcpy(buf, connector->eld, min(max_bytes, ret));
759
760                 break;
761         }
762         drm_connector_list_iter_end(&conn_iter);
763
764         mutex_unlock(&adev->dm.audio_lock);
765
766         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
767
768         return ret;
769 }
770
771 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
772         .get_eld = amdgpu_dm_audio_component_get_eld,
773 };
774
775 static int amdgpu_dm_audio_component_bind(struct device *kdev,
776                                        struct device *hda_kdev, void *data)
777 {
778         struct drm_device *dev = dev_get_drvdata(kdev);
779         struct amdgpu_device *adev = drm_to_adev(dev);
780         struct drm_audio_component *acomp = data;
781
782         acomp->ops = &amdgpu_dm_audio_component_ops;
783         acomp->dev = kdev;
784         adev->dm.audio_component = acomp;
785
786         return 0;
787 }
788
789 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
790                                           struct device *hda_kdev, void *data)
791 {
792         struct drm_device *dev = dev_get_drvdata(kdev);
793         struct amdgpu_device *adev = drm_to_adev(dev);
794         struct drm_audio_component *acomp = data;
795
796         acomp->ops = NULL;
797         acomp->dev = NULL;
798         adev->dm.audio_component = NULL;
799 }
800
801 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
802         .bind   = amdgpu_dm_audio_component_bind,
803         .unbind = amdgpu_dm_audio_component_unbind,
804 };
805
806 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
807 {
808         int i, ret;
809
810         if (!amdgpu_audio)
811                 return 0;
812
813         adev->mode_info.audio.enabled = true;
814
815         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
816
817         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
818                 adev->mode_info.audio.pin[i].channels = -1;
819                 adev->mode_info.audio.pin[i].rate = -1;
820                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
821                 adev->mode_info.audio.pin[i].status_bits = 0;
822                 adev->mode_info.audio.pin[i].category_code = 0;
823                 adev->mode_info.audio.pin[i].connected = false;
824                 adev->mode_info.audio.pin[i].id =
825                         adev->dm.dc->res_pool->audios[i]->inst;
826                 adev->mode_info.audio.pin[i].offset = 0;
827         }
828
829         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
830         if (ret < 0)
831                 return ret;
832
833         adev->dm.audio_registered = true;
834
835         return 0;
836 }
837
838 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
839 {
840         if (!amdgpu_audio)
841                 return;
842
843         if (!adev->mode_info.audio.enabled)
844                 return;
845
846         if (adev->dm.audio_registered) {
847                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
848                 adev->dm.audio_registered = false;
849         }
850
851         /* TODO: Disable audio? */
852
853         adev->mode_info.audio.enabled = false;
854 }
855
856 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
857 {
858         struct drm_audio_component *acomp = adev->dm.audio_component;
859
860         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
861                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
862
863                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
864                                                  pin, -1);
865         }
866 }
867
868 static int dm_dmub_hw_init(struct amdgpu_device *adev)
869 {
870         const struct dmcub_firmware_header_v1_0 *hdr;
871         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
872         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
873         const struct firmware *dmub_fw = adev->dm.dmub_fw;
874         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
875         struct abm *abm = adev->dm.dc->res_pool->abm;
876         struct dmub_srv_hw_params hw_params;
877         enum dmub_status status;
878         const unsigned char *fw_inst_const, *fw_bss_data;
879         uint32_t i, fw_inst_const_size, fw_bss_data_size;
880         bool has_hw_support;
881
882         if (!dmub_srv)
883                 /* DMUB isn't supported on the ASIC. */
884                 return 0;
885
886         if (!fb_info) {
887                 DRM_ERROR("No framebuffer info for DMUB service.\n");
888                 return -EINVAL;
889         }
890
891         if (!dmub_fw) {
892                 /* Firmware required for DMUB support. */
893                 DRM_ERROR("No firmware provided for DMUB.\n");
894                 return -EINVAL;
895         }
896
897         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
898         if (status != DMUB_STATUS_OK) {
899                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
900                 return -EINVAL;
901         }
902
903         if (!has_hw_support) {
904                 DRM_INFO("DMUB unsupported on ASIC\n");
905                 return 0;
906         }
907
908         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
909
910         fw_inst_const = dmub_fw->data +
911                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
912                         PSP_HEADER_BYTES;
913
914         fw_bss_data = dmub_fw->data +
915                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
916                       le32_to_cpu(hdr->inst_const_bytes);
917
918         /* Copy firmware and bios info into FB memory. */
919         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
920                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
921
922         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
923
924         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
925          * amdgpu_ucode_init_single_fw will load dmub firmware
926          * fw_inst_const part to cw0; otherwise, the firmware back door load
927          * will be done by dm_dmub_hw_init
928          */
929         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
930                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
931                                 fw_inst_const_size);
932         }
933
934         if (fw_bss_data_size)
935                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
936                        fw_bss_data, fw_bss_data_size);
937
938         /* Copy firmware bios info into FB memory. */
939         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
940                adev->bios_size);
941
942         /* Reset regions that need to be reset. */
943         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
944         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
945
946         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
947                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
948
949         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
950                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
951
952         /* Initialize hardware. */
953         memset(&hw_params, 0, sizeof(hw_params));
954         hw_params.fb_base = adev->gmc.fb_start;
955         hw_params.fb_offset = adev->gmc.aper_base;
956
957         /* backdoor load firmware and trigger dmub running */
958         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
959                 hw_params.load_inst_const = true;
960
961         if (dmcu)
962                 hw_params.psp_version = dmcu->psp_version;
963
964         for (i = 0; i < fb_info->num_fb; ++i)
965                 hw_params.fb[i] = &fb_info->fb[i];
966
967         status = dmub_srv_hw_init(dmub_srv, &hw_params);
968         if (status != DMUB_STATUS_OK) {
969                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
970                 return -EINVAL;
971         }
972
973         /* Wait for firmware load to finish. */
974         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
975         if (status != DMUB_STATUS_OK)
976                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
977
978         /* Init DMCU and ABM if available. */
979         if (dmcu && abm) {
980                 dmcu->funcs->dmcu_init(dmcu);
981                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
982         }
983
984         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
985         if (!adev->dm.dc->ctx->dmub_srv) {
986                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
987                 return -ENOMEM;
988         }
989
990         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
991                  adev->dm.dmcub_fw_version);
992
993         return 0;
994 }
995
996 #if defined(CONFIG_DRM_AMD_DC_DCN)
997 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
998 {
999         uint64_t pt_base;
1000         uint32_t logical_addr_low;
1001         uint32_t logical_addr_high;
1002         uint32_t agp_base, agp_bot, agp_top;
1003         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1004
1005         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1006         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1007
1008         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1009                 /*
1010                  * Raven2 has a HW issue that it is unable to use the vram which
1011                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1012                  * workaround that increase system aperture high address (add 1)
1013                  * to get rid of the VM fault and hardware hang.
1014                  */
1015                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1016         else
1017                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1018
1019         agp_base = 0;
1020         agp_bot = adev->gmc.agp_start >> 24;
1021         agp_top = adev->gmc.agp_end >> 24;
1022
1023
1024         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1025         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1026         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1027         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1028         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1029         page_table_base.low_part = lower_32_bits(pt_base);
1030
1031         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1032         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1033
1034         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1035         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1036         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1037
1038         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1039         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1040         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1041
1042         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1043         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1044         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1045
1046         pa_config->is_hvm_enabled = 0;
1047
1048 }
1049 #endif
1050 #if defined(CONFIG_DRM_AMD_DC_DCN)
1051 static void event_mall_stutter(struct work_struct *work)
1052 {
1053
1054         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1055         struct amdgpu_display_manager *dm = vblank_work->dm;
1056
1057         mutex_lock(&dm->dc_lock);
1058
1059         if (vblank_work->enable)
1060                 dm->active_vblank_irq_count++;
1061         else if(dm->active_vblank_irq_count)
1062                 dm->active_vblank_irq_count--;
1063
1064         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1065
1066         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1067
1068         mutex_unlock(&dm->dc_lock);
1069 }
1070
1071 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1072 {
1073
1074         int max_caps = dc->caps.max_links;
1075         struct vblank_workqueue *vblank_work;
1076         int i = 0;
1077
1078         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1079         if (ZERO_OR_NULL_PTR(vblank_work)) {
1080                 kfree(vblank_work);
1081                 return NULL;
1082         }
1083
1084         for (i = 0; i < max_caps; i++)
1085                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1086
1087         return vblank_work;
1088 }
1089 #endif
1090 static int amdgpu_dm_init(struct amdgpu_device *adev)
1091 {
1092         struct dc_init_data init_data;
1093 #ifdef CONFIG_DRM_AMD_DC_HDCP
1094         struct dc_callback_init init_params;
1095 #endif
1096         int r;
1097
1098         adev->dm.ddev = adev_to_drm(adev);
1099         adev->dm.adev = adev;
1100
1101         /* Zero all the fields */
1102         memset(&init_data, 0, sizeof(init_data));
1103 #ifdef CONFIG_DRM_AMD_DC_HDCP
1104         memset(&init_params, 0, sizeof(init_params));
1105 #endif
1106
1107         mutex_init(&adev->dm.dc_lock);
1108         mutex_init(&adev->dm.audio_lock);
1109 #if defined(CONFIG_DRM_AMD_DC_DCN)
1110         spin_lock_init(&adev->dm.vblank_lock);
1111 #endif
1112
1113         if(amdgpu_dm_irq_init(adev)) {
1114                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1115                 goto error;
1116         }
1117
1118         init_data.asic_id.chip_family = adev->family;
1119
1120         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1121         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1122
1123         init_data.asic_id.vram_width = adev->gmc.vram_width;
1124         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1125         init_data.asic_id.atombios_base_address =
1126                 adev->mode_info.atom_context->bios;
1127
1128         init_data.driver = adev;
1129
1130         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1131
1132         if (!adev->dm.cgs_device) {
1133                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1134                 goto error;
1135         }
1136
1137         init_data.cgs_device = adev->dm.cgs_device;
1138
1139         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1140
1141         switch (adev->asic_type) {
1142         case CHIP_CARRIZO:
1143         case CHIP_STONEY:
1144         case CHIP_RAVEN:
1145         case CHIP_RENOIR:
1146                 init_data.flags.gpu_vm_support = true;
1147                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1148                         init_data.flags.disable_dmcu = true;
1149                 break;
1150 #if defined(CONFIG_DRM_AMD_DC_DCN)
1151         case CHIP_VANGOGH:
1152                 init_data.flags.gpu_vm_support = true;
1153                 break;
1154 #endif
1155         default:
1156                 break;
1157         }
1158
1159         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1160                 init_data.flags.fbc_support = true;
1161
1162         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1163                 init_data.flags.multi_mon_pp_mclk_switch = true;
1164
1165         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1166                 init_data.flags.disable_fractional_pwm = true;
1167
1168         init_data.flags.power_down_display_on_boot = true;
1169
1170         INIT_LIST_HEAD(&adev->dm.da_list);
1171         /* Display Core create. */
1172         adev->dm.dc = dc_create(&init_data);
1173
1174         if (adev->dm.dc) {
1175                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1176         } else {
1177                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1178                 goto error;
1179         }
1180
1181         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1182                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1183                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1184         }
1185
1186         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1187                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1188
1189         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1190                 adev->dm.dc->debug.disable_stutter = true;
1191
1192         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1193                 adev->dm.dc->debug.disable_dsc = true;
1194
1195         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1196                 adev->dm.dc->debug.disable_clock_gate = true;
1197
1198         r = dm_dmub_hw_init(adev);
1199         if (r) {
1200                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1201                 goto error;
1202         }
1203
1204         dc_hardware_init(adev->dm.dc);
1205
1206 #if defined(CONFIG_DRM_AMD_DC_DCN)
1207         if (adev->apu_flags) {
1208                 struct dc_phy_addr_space_config pa_config;
1209
1210                 mmhub_read_system_context(adev, &pa_config);
1211
1212                 // Call the DC init_memory func
1213                 dc_setup_system_context(adev->dm.dc, &pa_config);
1214         }
1215 #endif
1216
1217         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1218         if (!adev->dm.freesync_module) {
1219                 DRM_ERROR(
1220                 "amdgpu: failed to initialize freesync_module.\n");
1221         } else
1222                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1223                                 adev->dm.freesync_module);
1224
1225         amdgpu_dm_init_color_mod();
1226
1227 #if defined(CONFIG_DRM_AMD_DC_DCN)
1228         if (adev->dm.dc->caps.max_links > 0) {
1229                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1230
1231                 if (!adev->dm.vblank_workqueue)
1232                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1233                 else
1234                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1235         }
1236 #endif
1237
1238 #ifdef CONFIG_DRM_AMD_DC_HDCP
1239         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1240                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1241
1242                 if (!adev->dm.hdcp_workqueue)
1243                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1244                 else
1245                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1246
1247                 dc_init_callbacks(adev->dm.dc, &init_params);
1248         }
1249 #endif
1250 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1251         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1252 #endif
1253         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1254                 init_completion(&adev->dm.dmub_aux_transfer_done);
1255                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1256                 if (!adev->dm.dmub_notify) {
1257                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1258                         goto error;
1259                 }
1260                 amdgpu_dm_outbox_init(adev);
1261         }
1262
1263         if (amdgpu_dm_initialize_drm_device(adev)) {
1264                 DRM_ERROR(
1265                 "amdgpu: failed to initialize sw for display support.\n");
1266                 goto error;
1267         }
1268
1269         /* create fake encoders for MST */
1270         dm_dp_create_fake_mst_encoders(adev);
1271
1272         /* TODO: Add_display_info? */
1273
1274         /* TODO use dynamic cursor width */
1275         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1276         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1277
1278         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1279                 DRM_ERROR(
1280                 "amdgpu: failed to initialize sw for display support.\n");
1281                 goto error;
1282         }
1283
1284
1285         DRM_DEBUG_DRIVER("KMS initialized.\n");
1286
1287         return 0;
1288 error:
1289         amdgpu_dm_fini(adev);
1290
1291         return -EINVAL;
1292 }
1293
1294 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1295 {
1296         int i;
1297
1298         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1299                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1300         }
1301
1302         amdgpu_dm_audio_fini(adev);
1303
1304         amdgpu_dm_destroy_drm_device(&adev->dm);
1305
1306 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1307         if (adev->dm.crc_rd_wrk) {
1308                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1309                 kfree(adev->dm.crc_rd_wrk);
1310                 adev->dm.crc_rd_wrk = NULL;
1311         }
1312 #endif
1313 #ifdef CONFIG_DRM_AMD_DC_HDCP
1314         if (adev->dm.hdcp_workqueue) {
1315                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1316                 adev->dm.hdcp_workqueue = NULL;
1317         }
1318
1319         if (adev->dm.dc)
1320                 dc_deinit_callbacks(adev->dm.dc);
1321 #endif
1322
1323 #if defined(CONFIG_DRM_AMD_DC_DCN)
1324         if (adev->dm.vblank_workqueue) {
1325                 adev->dm.vblank_workqueue->dm = NULL;
1326                 kfree(adev->dm.vblank_workqueue);
1327                 adev->dm.vblank_workqueue = NULL;
1328         }
1329 #endif
1330
1331         if (adev->dm.dc->ctx->dmub_srv) {
1332                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1333                 adev->dm.dc->ctx->dmub_srv = NULL;
1334         }
1335
1336         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1337                 kfree(adev->dm.dmub_notify);
1338                 adev->dm.dmub_notify = NULL;
1339         }
1340
1341         if (adev->dm.dmub_bo)
1342                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1343                                       &adev->dm.dmub_bo_gpu_addr,
1344                                       &adev->dm.dmub_bo_cpu_addr);
1345
1346         /* DC Destroy TODO: Replace destroy DAL */
1347         if (adev->dm.dc)
1348                 dc_destroy(&adev->dm.dc);
1349         /*
1350          * TODO: pageflip, vlank interrupt
1351          *
1352          * amdgpu_dm_irq_fini(adev);
1353          */
1354
1355         if (adev->dm.cgs_device) {
1356                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1357                 adev->dm.cgs_device = NULL;
1358         }
1359         if (adev->dm.freesync_module) {
1360                 mod_freesync_destroy(adev->dm.freesync_module);
1361                 adev->dm.freesync_module = NULL;
1362         }
1363
1364         mutex_destroy(&adev->dm.audio_lock);
1365         mutex_destroy(&adev->dm.dc_lock);
1366
1367         return;
1368 }
1369
1370 static int load_dmcu_fw(struct amdgpu_device *adev)
1371 {
1372         const char *fw_name_dmcu = NULL;
1373         int r;
1374         const struct dmcu_firmware_header_v1_0 *hdr;
1375
1376         switch(adev->asic_type) {
1377 #if defined(CONFIG_DRM_AMD_DC_SI)
1378         case CHIP_TAHITI:
1379         case CHIP_PITCAIRN:
1380         case CHIP_VERDE:
1381         case CHIP_OLAND:
1382 #endif
1383         case CHIP_BONAIRE:
1384         case CHIP_HAWAII:
1385         case CHIP_KAVERI:
1386         case CHIP_KABINI:
1387         case CHIP_MULLINS:
1388         case CHIP_TONGA:
1389         case CHIP_FIJI:
1390         case CHIP_CARRIZO:
1391         case CHIP_STONEY:
1392         case CHIP_POLARIS11:
1393         case CHIP_POLARIS10:
1394         case CHIP_POLARIS12:
1395         case CHIP_VEGAM:
1396         case CHIP_VEGA10:
1397         case CHIP_VEGA12:
1398         case CHIP_VEGA20:
1399         case CHIP_NAVI10:
1400         case CHIP_NAVI14:
1401         case CHIP_RENOIR:
1402         case CHIP_SIENNA_CICHLID:
1403         case CHIP_NAVY_FLOUNDER:
1404         case CHIP_DIMGREY_CAVEFISH:
1405         case CHIP_BEIGE_GOBY:
1406         case CHIP_VANGOGH:
1407                 return 0;
1408         case CHIP_NAVI12:
1409                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1410                 break;
1411         case CHIP_RAVEN:
1412                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1413                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1414                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1415                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1416                 else
1417                         return 0;
1418                 break;
1419         default:
1420                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1421                 return -EINVAL;
1422         }
1423
1424         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1425                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1426                 return 0;
1427         }
1428
1429         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1430         if (r == -ENOENT) {
1431                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1432                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1433                 adev->dm.fw_dmcu = NULL;
1434                 return 0;
1435         }
1436         if (r) {
1437                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1438                         fw_name_dmcu);
1439                 return r;
1440         }
1441
1442         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1443         if (r) {
1444                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1445                         fw_name_dmcu);
1446                 release_firmware(adev->dm.fw_dmcu);
1447                 adev->dm.fw_dmcu = NULL;
1448                 return r;
1449         }
1450
1451         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1452         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1453         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1454         adev->firmware.fw_size +=
1455                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1456
1457         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1458         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1459         adev->firmware.fw_size +=
1460                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1461
1462         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1463
1464         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1465
1466         return 0;
1467 }
1468
1469 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1470 {
1471         struct amdgpu_device *adev = ctx;
1472
1473         return dm_read_reg(adev->dm.dc->ctx, address);
1474 }
1475
1476 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1477                                      uint32_t value)
1478 {
1479         struct amdgpu_device *adev = ctx;
1480
1481         return dm_write_reg(adev->dm.dc->ctx, address, value);
1482 }
1483
1484 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1485 {
1486         struct dmub_srv_create_params create_params;
1487         struct dmub_srv_region_params region_params;
1488         struct dmub_srv_region_info region_info;
1489         struct dmub_srv_fb_params fb_params;
1490         struct dmub_srv_fb_info *fb_info;
1491         struct dmub_srv *dmub_srv;
1492         const struct dmcub_firmware_header_v1_0 *hdr;
1493         const char *fw_name_dmub;
1494         enum dmub_asic dmub_asic;
1495         enum dmub_status status;
1496         int r;
1497
1498         switch (adev->asic_type) {
1499         case CHIP_RENOIR:
1500                 dmub_asic = DMUB_ASIC_DCN21;
1501                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1502                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1503                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1504                 break;
1505         case CHIP_SIENNA_CICHLID:
1506                 dmub_asic = DMUB_ASIC_DCN30;
1507                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1508                 break;
1509         case CHIP_NAVY_FLOUNDER:
1510                 dmub_asic = DMUB_ASIC_DCN30;
1511                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1512                 break;
1513         case CHIP_VANGOGH:
1514                 dmub_asic = DMUB_ASIC_DCN301;
1515                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1516                 break;
1517         case CHIP_DIMGREY_CAVEFISH:
1518                 dmub_asic = DMUB_ASIC_DCN302;
1519                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1520                 break;
1521         case CHIP_BEIGE_GOBY:
1522                 dmub_asic = DMUB_ASIC_DCN303;
1523                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1524                 break;
1525
1526         default:
1527                 /* ASIC doesn't support DMUB. */
1528                 return 0;
1529         }
1530
1531         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1532         if (r) {
1533                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1534                 return 0;
1535         }
1536
1537         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1538         if (r) {
1539                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1540                 return 0;
1541         }
1542
1543         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1544
1545         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1546                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1547                         AMDGPU_UCODE_ID_DMCUB;
1548                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1549                         adev->dm.dmub_fw;
1550                 adev->firmware.fw_size +=
1551                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1552
1553                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1554                          adev->dm.dmcub_fw_version);
1555         }
1556
1557         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1558
1559         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1560         dmub_srv = adev->dm.dmub_srv;
1561
1562         if (!dmub_srv) {
1563                 DRM_ERROR("Failed to allocate DMUB service!\n");
1564                 return -ENOMEM;
1565         }
1566
1567         memset(&create_params, 0, sizeof(create_params));
1568         create_params.user_ctx = adev;
1569         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1570         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1571         create_params.asic = dmub_asic;
1572
1573         /* Create the DMUB service. */
1574         status = dmub_srv_create(dmub_srv, &create_params);
1575         if (status != DMUB_STATUS_OK) {
1576                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1577                 return -EINVAL;
1578         }
1579
1580         /* Calculate the size of all the regions for the DMUB service. */
1581         memset(&region_params, 0, sizeof(region_params));
1582
1583         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1584                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1585         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1586         region_params.vbios_size = adev->bios_size;
1587         region_params.fw_bss_data = region_params.bss_data_size ?
1588                 adev->dm.dmub_fw->data +
1589                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1590                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1591         region_params.fw_inst_const =
1592                 adev->dm.dmub_fw->data +
1593                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1594                 PSP_HEADER_BYTES;
1595
1596         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1597                                            &region_info);
1598
1599         if (status != DMUB_STATUS_OK) {
1600                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1601                 return -EINVAL;
1602         }
1603
1604         /*
1605          * Allocate a framebuffer based on the total size of all the regions.
1606          * TODO: Move this into GART.
1607          */
1608         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1609                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1610                                     &adev->dm.dmub_bo_gpu_addr,
1611                                     &adev->dm.dmub_bo_cpu_addr);
1612         if (r)
1613                 return r;
1614
1615         /* Rebase the regions on the framebuffer address. */
1616         memset(&fb_params, 0, sizeof(fb_params));
1617         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1618         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1619         fb_params.region_info = &region_info;
1620
1621         adev->dm.dmub_fb_info =
1622                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1623         fb_info = adev->dm.dmub_fb_info;
1624
1625         if (!fb_info) {
1626                 DRM_ERROR(
1627                         "Failed to allocate framebuffer info for DMUB service!\n");
1628                 return -ENOMEM;
1629         }
1630
1631         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1632         if (status != DMUB_STATUS_OK) {
1633                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1634                 return -EINVAL;
1635         }
1636
1637         return 0;
1638 }
1639
1640 static int dm_sw_init(void *handle)
1641 {
1642         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1643         int r;
1644
1645         r = dm_dmub_sw_init(adev);
1646         if (r)
1647                 return r;
1648
1649         return load_dmcu_fw(adev);
1650 }
1651
1652 static int dm_sw_fini(void *handle)
1653 {
1654         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1655
1656         kfree(adev->dm.dmub_fb_info);
1657         adev->dm.dmub_fb_info = NULL;
1658
1659         if (adev->dm.dmub_srv) {
1660                 dmub_srv_destroy(adev->dm.dmub_srv);
1661                 adev->dm.dmub_srv = NULL;
1662         }
1663
1664         release_firmware(adev->dm.dmub_fw);
1665         adev->dm.dmub_fw = NULL;
1666
1667         release_firmware(adev->dm.fw_dmcu);
1668         adev->dm.fw_dmcu = NULL;
1669
1670         return 0;
1671 }
1672
1673 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1674 {
1675         struct amdgpu_dm_connector *aconnector;
1676         struct drm_connector *connector;
1677         struct drm_connector_list_iter iter;
1678         int ret = 0;
1679
1680         drm_connector_list_iter_begin(dev, &iter);
1681         drm_for_each_connector_iter(connector, &iter) {
1682                 aconnector = to_amdgpu_dm_connector(connector);
1683                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1684                     aconnector->mst_mgr.aux) {
1685                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1686                                          aconnector,
1687                                          aconnector->base.base.id);
1688
1689                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1690                         if (ret < 0) {
1691                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1692                                 aconnector->dc_link->type =
1693                                         dc_connection_single;
1694                                 break;
1695                         }
1696                 }
1697         }
1698         drm_connector_list_iter_end(&iter);
1699
1700         return ret;
1701 }
1702
1703 static int dm_late_init(void *handle)
1704 {
1705         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1706
1707         struct dmcu_iram_parameters params;
1708         unsigned int linear_lut[16];
1709         int i;
1710         struct dmcu *dmcu = NULL;
1711         bool ret = true;
1712
1713         dmcu = adev->dm.dc->res_pool->dmcu;
1714
1715         for (i = 0; i < 16; i++)
1716                 linear_lut[i] = 0xFFFF * i / 15;
1717
1718         params.set = 0;
1719         params.backlight_ramping_start = 0xCCCC;
1720         params.backlight_ramping_reduction = 0xCCCCCCCC;
1721         params.backlight_lut_array_size = 16;
1722         params.backlight_lut_array = linear_lut;
1723
1724         /* Min backlight level after ABM reduction,  Don't allow below 1%
1725          * 0xFFFF x 0.01 = 0x28F
1726          */
1727         params.min_abm_backlight = 0x28F;
1728
1729         /* In the case where abm is implemented on dmcub,
1730          * dmcu object will be null.
1731          * ABM 2.4 and up are implemented on dmcub.
1732          */
1733         if (dmcu)
1734                 ret = dmcu_load_iram(dmcu, params);
1735         else if (adev->dm.dc->ctx->dmub_srv)
1736                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1737
1738         if (!ret)
1739                 return -EINVAL;
1740
1741         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1742 }
1743
1744 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1745 {
1746         struct amdgpu_dm_connector *aconnector;
1747         struct drm_connector *connector;
1748         struct drm_connector_list_iter iter;
1749         struct drm_dp_mst_topology_mgr *mgr;
1750         int ret;
1751         bool need_hotplug = false;
1752
1753         drm_connector_list_iter_begin(dev, &iter);
1754         drm_for_each_connector_iter(connector, &iter) {
1755                 aconnector = to_amdgpu_dm_connector(connector);
1756                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1757                     aconnector->mst_port)
1758                         continue;
1759
1760                 mgr = &aconnector->mst_mgr;
1761
1762                 if (suspend) {
1763                         drm_dp_mst_topology_mgr_suspend(mgr);
1764                 } else {
1765                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1766                         if (ret < 0) {
1767                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1768                                 need_hotplug = true;
1769                         }
1770                 }
1771         }
1772         drm_connector_list_iter_end(&iter);
1773
1774         if (need_hotplug)
1775                 drm_kms_helper_hotplug_event(dev);
1776 }
1777
1778 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1779 {
1780         struct smu_context *smu = &adev->smu;
1781         int ret = 0;
1782
1783         if (!is_support_sw_smu(adev))
1784                 return 0;
1785
1786         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1787          * on window driver dc implementation.
1788          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1789          * should be passed to smu during boot up and resume from s3.
1790          * boot up: dc calculate dcn watermark clock settings within dc_create,
1791          * dcn20_resource_construct
1792          * then call pplib functions below to pass the settings to smu:
1793          * smu_set_watermarks_for_clock_ranges
1794          * smu_set_watermarks_table
1795          * navi10_set_watermarks_table
1796          * smu_write_watermarks_table
1797          *
1798          * For Renoir, clock settings of dcn watermark are also fixed values.
1799          * dc has implemented different flow for window driver:
1800          * dc_hardware_init / dc_set_power_state
1801          * dcn10_init_hw
1802          * notify_wm_ranges
1803          * set_wm_ranges
1804          * -- Linux
1805          * smu_set_watermarks_for_clock_ranges
1806          * renoir_set_watermarks_table
1807          * smu_write_watermarks_table
1808          *
1809          * For Linux,
1810          * dc_hardware_init -> amdgpu_dm_init
1811          * dc_set_power_state --> dm_resume
1812          *
1813          * therefore, this function apply to navi10/12/14 but not Renoir
1814          * *
1815          */
1816         switch(adev->asic_type) {
1817         case CHIP_NAVI10:
1818         case CHIP_NAVI14:
1819         case CHIP_NAVI12:
1820                 break;
1821         default:
1822                 return 0;
1823         }
1824
1825         ret = smu_write_watermarks_table(smu);
1826         if (ret) {
1827                 DRM_ERROR("Failed to update WMTABLE!\n");
1828                 return ret;
1829         }
1830
1831         return 0;
1832 }
1833
1834 /**
1835  * dm_hw_init() - Initialize DC device
1836  * @handle: The base driver device containing the amdgpu_dm device.
1837  *
1838  * Initialize the &struct amdgpu_display_manager device. This involves calling
1839  * the initializers of each DM component, then populating the struct with them.
1840  *
1841  * Although the function implies hardware initialization, both hardware and
1842  * software are initialized here. Splitting them out to their relevant init
1843  * hooks is a future TODO item.
1844  *
1845  * Some notable things that are initialized here:
1846  *
1847  * - Display Core, both software and hardware
1848  * - DC modules that we need (freesync and color management)
1849  * - DRM software states
1850  * - Interrupt sources and handlers
1851  * - Vblank support
1852  * - Debug FS entries, if enabled
1853  */
1854 static int dm_hw_init(void *handle)
1855 {
1856         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1857         /* Create DAL display manager */
1858         amdgpu_dm_init(adev);
1859         amdgpu_dm_hpd_init(adev);
1860
1861         return 0;
1862 }
1863
1864 /**
1865  * dm_hw_fini() - Teardown DC device
1866  * @handle: The base driver device containing the amdgpu_dm device.
1867  *
1868  * Teardown components within &struct amdgpu_display_manager that require
1869  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1870  * were loaded. Also flush IRQ workqueues and disable them.
1871  */
1872 static int dm_hw_fini(void *handle)
1873 {
1874         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1875
1876         amdgpu_dm_hpd_fini(adev);
1877
1878         amdgpu_dm_irq_fini(adev);
1879         amdgpu_dm_fini(adev);
1880         return 0;
1881 }
1882
1883
1884 static int dm_enable_vblank(struct drm_crtc *crtc);
1885 static void dm_disable_vblank(struct drm_crtc *crtc);
1886
1887 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1888                                  struct dc_state *state, bool enable)
1889 {
1890         enum dc_irq_source irq_source;
1891         struct amdgpu_crtc *acrtc;
1892         int rc = -EBUSY;
1893         int i = 0;
1894
1895         for (i = 0; i < state->stream_count; i++) {
1896                 acrtc = get_crtc_by_otg_inst(
1897                                 adev, state->stream_status[i].primary_otg_inst);
1898
1899                 if (acrtc && state->stream_status[i].plane_count != 0) {
1900                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1901                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1902                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1903                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1904                         if (rc)
1905                                 DRM_WARN("Failed to %s pflip interrupts\n",
1906                                          enable ? "enable" : "disable");
1907
1908                         if (enable) {
1909                                 rc = dm_enable_vblank(&acrtc->base);
1910                                 if (rc)
1911                                         DRM_WARN("Failed to enable vblank interrupts\n");
1912                         } else {
1913                                 dm_disable_vblank(&acrtc->base);
1914                         }
1915
1916                 }
1917         }
1918
1919 }
1920
1921 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1922 {
1923         struct dc_state *context = NULL;
1924         enum dc_status res = DC_ERROR_UNEXPECTED;
1925         int i;
1926         struct dc_stream_state *del_streams[MAX_PIPES];
1927         int del_streams_count = 0;
1928
1929         memset(del_streams, 0, sizeof(del_streams));
1930
1931         context = dc_create_state(dc);
1932         if (context == NULL)
1933                 goto context_alloc_fail;
1934
1935         dc_resource_state_copy_construct_current(dc, context);
1936
1937         /* First remove from context all streams */
1938         for (i = 0; i < context->stream_count; i++) {
1939                 struct dc_stream_state *stream = context->streams[i];
1940
1941                 del_streams[del_streams_count++] = stream;
1942         }
1943
1944         /* Remove all planes for removed streams and then remove the streams */
1945         for (i = 0; i < del_streams_count; i++) {
1946                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1947                         res = DC_FAIL_DETACH_SURFACES;
1948                         goto fail;
1949                 }
1950
1951                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1952                 if (res != DC_OK)
1953                         goto fail;
1954         }
1955
1956
1957         res = dc_validate_global_state(dc, context, false);
1958
1959         if (res != DC_OK) {
1960                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1961                 goto fail;
1962         }
1963
1964         res = dc_commit_state(dc, context);
1965
1966 fail:
1967         dc_release_state(context);
1968
1969 context_alloc_fail:
1970         return res;
1971 }
1972
1973 static int dm_suspend(void *handle)
1974 {
1975         struct amdgpu_device *adev = handle;
1976         struct amdgpu_display_manager *dm = &adev->dm;
1977         int ret = 0;
1978
1979         if (amdgpu_in_reset(adev)) {
1980                 mutex_lock(&dm->dc_lock);
1981
1982 #if defined(CONFIG_DRM_AMD_DC_DCN)
1983                 dc_allow_idle_optimizations(adev->dm.dc, false);
1984 #endif
1985
1986                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1987
1988                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1989
1990                 amdgpu_dm_commit_zero_streams(dm->dc);
1991
1992                 amdgpu_dm_irq_suspend(adev);
1993
1994                 return ret;
1995         }
1996
1997         WARN_ON(adev->dm.cached_state);
1998         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1999
2000         s3_handle_mst(adev_to_drm(adev), true);
2001
2002         amdgpu_dm_irq_suspend(adev);
2003
2004
2005         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2006
2007         return 0;
2008 }
2009
2010 static struct amdgpu_dm_connector *
2011 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2012                                              struct drm_crtc *crtc)
2013 {
2014         uint32_t i;
2015         struct drm_connector_state *new_con_state;
2016         struct drm_connector *connector;
2017         struct drm_crtc *crtc_from_state;
2018
2019         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2020                 crtc_from_state = new_con_state->crtc;
2021
2022                 if (crtc_from_state == crtc)
2023                         return to_amdgpu_dm_connector(connector);
2024         }
2025
2026         return NULL;
2027 }
2028
2029 static void emulated_link_detect(struct dc_link *link)
2030 {
2031         struct dc_sink_init_data sink_init_data = { 0 };
2032         struct display_sink_capability sink_caps = { 0 };
2033         enum dc_edid_status edid_status;
2034         struct dc_context *dc_ctx = link->ctx;
2035         struct dc_sink *sink = NULL;
2036         struct dc_sink *prev_sink = NULL;
2037
2038         link->type = dc_connection_none;
2039         prev_sink = link->local_sink;
2040
2041         if (prev_sink)
2042                 dc_sink_release(prev_sink);
2043
2044         switch (link->connector_signal) {
2045         case SIGNAL_TYPE_HDMI_TYPE_A: {
2046                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2047                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2048                 break;
2049         }
2050
2051         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2052                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2053                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2054                 break;
2055         }
2056
2057         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2058                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2059                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2060                 break;
2061         }
2062
2063         case SIGNAL_TYPE_LVDS: {
2064                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2065                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2066                 break;
2067         }
2068
2069         case SIGNAL_TYPE_EDP: {
2070                 sink_caps.transaction_type =
2071                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2072                 sink_caps.signal = SIGNAL_TYPE_EDP;
2073                 break;
2074         }
2075
2076         case SIGNAL_TYPE_DISPLAY_PORT: {
2077                 sink_caps.transaction_type =
2078                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2079                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2080                 break;
2081         }
2082
2083         default:
2084                 DC_ERROR("Invalid connector type! signal:%d\n",
2085                         link->connector_signal);
2086                 return;
2087         }
2088
2089         sink_init_data.link = link;
2090         sink_init_data.sink_signal = sink_caps.signal;
2091
2092         sink = dc_sink_create(&sink_init_data);
2093         if (!sink) {
2094                 DC_ERROR("Failed to create sink!\n");
2095                 return;
2096         }
2097
2098         /* dc_sink_create returns a new reference */
2099         link->local_sink = sink;
2100
2101         edid_status = dm_helpers_read_local_edid(
2102                         link->ctx,
2103                         link,
2104                         sink);
2105
2106         if (edid_status != EDID_OK)
2107                 DC_ERROR("Failed to read EDID");
2108
2109 }
2110
2111 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2112                                      struct amdgpu_display_manager *dm)
2113 {
2114         struct {
2115                 struct dc_surface_update surface_updates[MAX_SURFACES];
2116                 struct dc_plane_info plane_infos[MAX_SURFACES];
2117                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2118                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2119                 struct dc_stream_update stream_update;
2120         } * bundle;
2121         int k, m;
2122
2123         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2124
2125         if (!bundle) {
2126                 dm_error("Failed to allocate update bundle\n");
2127                 goto cleanup;
2128         }
2129
2130         for (k = 0; k < dc_state->stream_count; k++) {
2131                 bundle->stream_update.stream = dc_state->streams[k];
2132
2133                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2134                         bundle->surface_updates[m].surface =
2135                                 dc_state->stream_status->plane_states[m];
2136                         bundle->surface_updates[m].surface->force_full_update =
2137                                 true;
2138                 }
2139                 dc_commit_updates_for_stream(
2140                         dm->dc, bundle->surface_updates,
2141                         dc_state->stream_status->plane_count,
2142                         dc_state->streams[k], &bundle->stream_update, dc_state);
2143         }
2144
2145 cleanup:
2146         kfree(bundle);
2147
2148         return;
2149 }
2150
2151 static void dm_set_dpms_off(struct dc_link *link)
2152 {
2153         struct dc_stream_state *stream_state;
2154         struct amdgpu_dm_connector *aconnector = link->priv;
2155         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2156         struct dc_stream_update stream_update;
2157         bool dpms_off = true;
2158
2159         memset(&stream_update, 0, sizeof(stream_update));
2160         stream_update.dpms_off = &dpms_off;
2161
2162         mutex_lock(&adev->dm.dc_lock);
2163         stream_state = dc_stream_find_from_link(link);
2164
2165         if (stream_state == NULL) {
2166                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2167                 mutex_unlock(&adev->dm.dc_lock);
2168                 return;
2169         }
2170
2171         stream_update.stream = stream_state;
2172         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2173                                      stream_state, &stream_update,
2174                                      stream_state->ctx->dc->current_state);
2175         mutex_unlock(&adev->dm.dc_lock);
2176 }
2177
2178 static int dm_resume(void *handle)
2179 {
2180         struct amdgpu_device *adev = handle;
2181         struct drm_device *ddev = adev_to_drm(adev);
2182         struct amdgpu_display_manager *dm = &adev->dm;
2183         struct amdgpu_dm_connector *aconnector;
2184         struct drm_connector *connector;
2185         struct drm_connector_list_iter iter;
2186         struct drm_crtc *crtc;
2187         struct drm_crtc_state *new_crtc_state;
2188         struct dm_crtc_state *dm_new_crtc_state;
2189         struct drm_plane *plane;
2190         struct drm_plane_state *new_plane_state;
2191         struct dm_plane_state *dm_new_plane_state;
2192         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2193         enum dc_connection_type new_connection_type = dc_connection_none;
2194         struct dc_state *dc_state;
2195         int i, r, j;
2196
2197         if (amdgpu_in_reset(adev)) {
2198                 dc_state = dm->cached_dc_state;
2199
2200                 r = dm_dmub_hw_init(adev);
2201                 if (r)
2202                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2203
2204                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2205                 dc_resume(dm->dc);
2206
2207                 amdgpu_dm_irq_resume_early(adev);
2208
2209                 for (i = 0; i < dc_state->stream_count; i++) {
2210                         dc_state->streams[i]->mode_changed = true;
2211                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2212                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2213                                         = 0xffffffff;
2214                         }
2215                 }
2216
2217                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2218
2219                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2220
2221                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2222
2223                 dc_release_state(dm->cached_dc_state);
2224                 dm->cached_dc_state = NULL;
2225
2226                 amdgpu_dm_irq_resume_late(adev);
2227
2228                 mutex_unlock(&dm->dc_lock);
2229
2230                 return 0;
2231         }
2232         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2233         dc_release_state(dm_state->context);
2234         dm_state->context = dc_create_state(dm->dc);
2235         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2236         dc_resource_state_construct(dm->dc, dm_state->context);
2237
2238         /* Before powering on DC we need to re-initialize DMUB. */
2239         r = dm_dmub_hw_init(adev);
2240         if (r)
2241                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2242
2243         /* power on hardware */
2244         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2245
2246         /* program HPD filter */
2247         dc_resume(dm->dc);
2248
2249         /*
2250          * early enable HPD Rx IRQ, should be done before set mode as short
2251          * pulse interrupts are used for MST
2252          */
2253         amdgpu_dm_irq_resume_early(adev);
2254
2255         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2256         s3_handle_mst(ddev, false);
2257
2258         /* Do detection*/
2259         drm_connector_list_iter_begin(ddev, &iter);
2260         drm_for_each_connector_iter(connector, &iter) {
2261                 aconnector = to_amdgpu_dm_connector(connector);
2262
2263                 /*
2264                  * this is the case when traversing through already created
2265                  * MST connectors, should be skipped
2266                  */
2267                 if (aconnector->mst_port)
2268                         continue;
2269
2270                 mutex_lock(&aconnector->hpd_lock);
2271                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2272                         DRM_ERROR("KMS: Failed to detect connector\n");
2273
2274                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2275                         emulated_link_detect(aconnector->dc_link);
2276                 else
2277                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2278
2279                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2280                         aconnector->fake_enable = false;
2281
2282                 if (aconnector->dc_sink)
2283                         dc_sink_release(aconnector->dc_sink);
2284                 aconnector->dc_sink = NULL;
2285                 amdgpu_dm_update_connector_after_detect(aconnector);
2286                 mutex_unlock(&aconnector->hpd_lock);
2287         }
2288         drm_connector_list_iter_end(&iter);
2289
2290         /* Force mode set in atomic commit */
2291         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2292                 new_crtc_state->active_changed = true;
2293
2294         /*
2295          * atomic_check is expected to create the dc states. We need to release
2296          * them here, since they were duplicated as part of the suspend
2297          * procedure.
2298          */
2299         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2300                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2301                 if (dm_new_crtc_state->stream) {
2302                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2303                         dc_stream_release(dm_new_crtc_state->stream);
2304                         dm_new_crtc_state->stream = NULL;
2305                 }
2306         }
2307
2308         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2309                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2310                 if (dm_new_plane_state->dc_state) {
2311                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2312                         dc_plane_state_release(dm_new_plane_state->dc_state);
2313                         dm_new_plane_state->dc_state = NULL;
2314                 }
2315         }
2316
2317         drm_atomic_helper_resume(ddev, dm->cached_state);
2318
2319         dm->cached_state = NULL;
2320
2321         amdgpu_dm_irq_resume_late(adev);
2322
2323         amdgpu_dm_smu_write_watermarks_table(adev);
2324
2325         return 0;
2326 }
2327
2328 /**
2329  * DOC: DM Lifecycle
2330  *
2331  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2332  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2333  * the base driver's device list to be initialized and torn down accordingly.
2334  *
2335  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2336  */
2337
2338 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2339         .name = "dm",
2340         .early_init = dm_early_init,
2341         .late_init = dm_late_init,
2342         .sw_init = dm_sw_init,
2343         .sw_fini = dm_sw_fini,
2344         .hw_init = dm_hw_init,
2345         .hw_fini = dm_hw_fini,
2346         .suspend = dm_suspend,
2347         .resume = dm_resume,
2348         .is_idle = dm_is_idle,
2349         .wait_for_idle = dm_wait_for_idle,
2350         .check_soft_reset = dm_check_soft_reset,
2351         .soft_reset = dm_soft_reset,
2352         .set_clockgating_state = dm_set_clockgating_state,
2353         .set_powergating_state = dm_set_powergating_state,
2354 };
2355
2356 const struct amdgpu_ip_block_version dm_ip_block =
2357 {
2358         .type = AMD_IP_BLOCK_TYPE_DCE,
2359         .major = 1,
2360         .minor = 0,
2361         .rev = 0,
2362         .funcs = &amdgpu_dm_funcs,
2363 };
2364
2365
2366 /**
2367  * DOC: atomic
2368  *
2369  * *WIP*
2370  */
2371
2372 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2373         .fb_create = amdgpu_display_user_framebuffer_create,
2374         .get_format_info = amd_get_format_info,
2375         .output_poll_changed = drm_fb_helper_output_poll_changed,
2376         .atomic_check = amdgpu_dm_atomic_check,
2377         .atomic_commit = drm_atomic_helper_commit,
2378 };
2379
2380 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2381         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2382 };
2383
2384 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2385 {
2386         u32 max_cll, min_cll, max, min, q, r;
2387         struct amdgpu_dm_backlight_caps *caps;
2388         struct amdgpu_display_manager *dm;
2389         struct drm_connector *conn_base;
2390         struct amdgpu_device *adev;
2391         struct dc_link *link = NULL;
2392         static const u8 pre_computed_values[] = {
2393                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2394                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2395
2396         if (!aconnector || !aconnector->dc_link)
2397                 return;
2398
2399         link = aconnector->dc_link;
2400         if (link->connector_signal != SIGNAL_TYPE_EDP)
2401                 return;
2402
2403         conn_base = &aconnector->base;
2404         adev = drm_to_adev(conn_base->dev);
2405         dm = &adev->dm;
2406         caps = &dm->backlight_caps;
2407         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2408         caps->aux_support = false;
2409         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2410         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2411
2412         if (caps->ext_caps->bits.oled == 1 ||
2413             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2414             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2415                 caps->aux_support = true;
2416
2417         if (amdgpu_backlight == 0)
2418                 caps->aux_support = false;
2419         else if (amdgpu_backlight == 1)
2420                 caps->aux_support = true;
2421
2422         /* From the specification (CTA-861-G), for calculating the maximum
2423          * luminance we need to use:
2424          *      Luminance = 50*2**(CV/32)
2425          * Where CV is a one-byte value.
2426          * For calculating this expression we may need float point precision;
2427          * to avoid this complexity level, we take advantage that CV is divided
2428          * by a constant. From the Euclids division algorithm, we know that CV
2429          * can be written as: CV = 32*q + r. Next, we replace CV in the
2430          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2431          * need to pre-compute the value of r/32. For pre-computing the values
2432          * We just used the following Ruby line:
2433          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2434          * The results of the above expressions can be verified at
2435          * pre_computed_values.
2436          */
2437         q = max_cll >> 5;
2438         r = max_cll % 32;
2439         max = (1 << q) * pre_computed_values[r];
2440
2441         // min luminance: maxLum * (CV/255)^2 / 100
2442         q = DIV_ROUND_CLOSEST(min_cll, 255);
2443         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2444
2445         caps->aux_max_input_signal = max;
2446         caps->aux_min_input_signal = min;
2447 }
2448
2449 void amdgpu_dm_update_connector_after_detect(
2450                 struct amdgpu_dm_connector *aconnector)
2451 {
2452         struct drm_connector *connector = &aconnector->base;
2453         struct drm_device *dev = connector->dev;
2454         struct dc_sink *sink;
2455
2456         /* MST handled by drm_mst framework */
2457         if (aconnector->mst_mgr.mst_state == true)
2458                 return;
2459
2460         sink = aconnector->dc_link->local_sink;
2461         if (sink)
2462                 dc_sink_retain(sink);
2463
2464         /*
2465          * Edid mgmt connector gets first update only in mode_valid hook and then
2466          * the connector sink is set to either fake or physical sink depends on link status.
2467          * Skip if already done during boot.
2468          */
2469         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2470                         && aconnector->dc_em_sink) {
2471
2472                 /*
2473                  * For S3 resume with headless use eml_sink to fake stream
2474                  * because on resume connector->sink is set to NULL
2475                  */
2476                 mutex_lock(&dev->mode_config.mutex);
2477
2478                 if (sink) {
2479                         if (aconnector->dc_sink) {
2480                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2481                                 /*
2482                                  * retain and release below are used to
2483                                  * bump up refcount for sink because the link doesn't point
2484                                  * to it anymore after disconnect, so on next crtc to connector
2485                                  * reshuffle by UMD we will get into unwanted dc_sink release
2486                                  */
2487                                 dc_sink_release(aconnector->dc_sink);
2488                         }
2489                         aconnector->dc_sink = sink;
2490                         dc_sink_retain(aconnector->dc_sink);
2491                         amdgpu_dm_update_freesync_caps(connector,
2492                                         aconnector->edid);
2493                 } else {
2494                         amdgpu_dm_update_freesync_caps(connector, NULL);
2495                         if (!aconnector->dc_sink) {
2496                                 aconnector->dc_sink = aconnector->dc_em_sink;
2497                                 dc_sink_retain(aconnector->dc_sink);
2498                         }
2499                 }
2500
2501                 mutex_unlock(&dev->mode_config.mutex);
2502
2503                 if (sink)
2504                         dc_sink_release(sink);
2505                 return;
2506         }
2507
2508         /*
2509          * TODO: temporary guard to look for proper fix
2510          * if this sink is MST sink, we should not do anything
2511          */
2512         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2513                 dc_sink_release(sink);
2514                 return;
2515         }
2516
2517         if (aconnector->dc_sink == sink) {
2518                 /*
2519                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2520                  * Do nothing!!
2521                  */
2522                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2523                                 aconnector->connector_id);
2524                 if (sink)
2525                         dc_sink_release(sink);
2526                 return;
2527         }
2528
2529         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2530                 aconnector->connector_id, aconnector->dc_sink, sink);
2531
2532         mutex_lock(&dev->mode_config.mutex);
2533
2534         /*
2535          * 1. Update status of the drm connector
2536          * 2. Send an event and let userspace tell us what to do
2537          */
2538         if (sink) {
2539                 /*
2540                  * TODO: check if we still need the S3 mode update workaround.
2541                  * If yes, put it here.
2542                  */
2543                 if (aconnector->dc_sink) {
2544                         amdgpu_dm_update_freesync_caps(connector, NULL);
2545                         dc_sink_release(aconnector->dc_sink);
2546                 }
2547
2548                 aconnector->dc_sink = sink;
2549                 dc_sink_retain(aconnector->dc_sink);
2550                 if (sink->dc_edid.length == 0) {
2551                         aconnector->edid = NULL;
2552                         if (aconnector->dc_link->aux_mode) {
2553                                 drm_dp_cec_unset_edid(
2554                                         &aconnector->dm_dp_aux.aux);
2555                         }
2556                 } else {
2557                         aconnector->edid =
2558                                 (struct edid *)sink->dc_edid.raw_edid;
2559
2560                         drm_connector_update_edid_property(connector,
2561                                                            aconnector->edid);
2562                         if (aconnector->dc_link->aux_mode)
2563                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2564                                                     aconnector->edid);
2565                 }
2566
2567                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2568                 update_connector_ext_caps(aconnector);
2569         } else {
2570                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2571                 amdgpu_dm_update_freesync_caps(connector, NULL);
2572                 drm_connector_update_edid_property(connector, NULL);
2573                 aconnector->num_modes = 0;
2574                 dc_sink_release(aconnector->dc_sink);
2575                 aconnector->dc_sink = NULL;
2576                 aconnector->edid = NULL;
2577 #ifdef CONFIG_DRM_AMD_DC_HDCP
2578                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2579                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2580                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2581 #endif
2582         }
2583
2584         mutex_unlock(&dev->mode_config.mutex);
2585
2586         update_subconnector_property(aconnector);
2587
2588         if (sink)
2589                 dc_sink_release(sink);
2590 }
2591
2592 static void handle_hpd_irq(void *param)
2593 {
2594         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2595         struct drm_connector *connector = &aconnector->base;
2596         struct drm_device *dev = connector->dev;
2597         enum dc_connection_type new_connection_type = dc_connection_none;
2598         struct amdgpu_device *adev = drm_to_adev(dev);
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2601 #endif
2602
2603         if (adev->dm.disable_hpd_irq)
2604                 return;
2605
2606         /*
2607          * In case of failure or MST no need to update connector status or notify the OS
2608          * since (for MST case) MST does this in its own context.
2609          */
2610         mutex_lock(&aconnector->hpd_lock);
2611
2612 #ifdef CONFIG_DRM_AMD_DC_HDCP
2613         if (adev->dm.hdcp_workqueue) {
2614                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2615                 dm_con_state->update_hdcp = true;
2616         }
2617 #endif
2618         if (aconnector->fake_enable)
2619                 aconnector->fake_enable = false;
2620
2621         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2622                 DRM_ERROR("KMS: Failed to detect connector\n");
2623
2624         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2625                 emulated_link_detect(aconnector->dc_link);
2626
2627
2628                 drm_modeset_lock_all(dev);
2629                 dm_restore_drm_connector_state(dev, connector);
2630                 drm_modeset_unlock_all(dev);
2631
2632                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2633                         drm_kms_helper_hotplug_event(dev);
2634
2635         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2636                 if (new_connection_type == dc_connection_none &&
2637                     aconnector->dc_link->type == dc_connection_none)
2638                         dm_set_dpms_off(aconnector->dc_link);
2639
2640                 amdgpu_dm_update_connector_after_detect(aconnector);
2641
2642                 drm_modeset_lock_all(dev);
2643                 dm_restore_drm_connector_state(dev, connector);
2644                 drm_modeset_unlock_all(dev);
2645
2646                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2647                         drm_kms_helper_hotplug_event(dev);
2648         }
2649         mutex_unlock(&aconnector->hpd_lock);
2650
2651 }
2652
2653 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2654 {
2655         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2656         uint8_t dret;
2657         bool new_irq_handled = false;
2658         int dpcd_addr;
2659         int dpcd_bytes_to_read;
2660
2661         const int max_process_count = 30;
2662         int process_count = 0;
2663
2664         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2665
2666         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2667                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2668                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2669                 dpcd_addr = DP_SINK_COUNT;
2670         } else {
2671                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2672                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2673                 dpcd_addr = DP_SINK_COUNT_ESI;
2674         }
2675
2676         dret = drm_dp_dpcd_read(
2677                 &aconnector->dm_dp_aux.aux,
2678                 dpcd_addr,
2679                 esi,
2680                 dpcd_bytes_to_read);
2681
2682         while (dret == dpcd_bytes_to_read &&
2683                 process_count < max_process_count) {
2684                 uint8_t retry;
2685                 dret = 0;
2686
2687                 process_count++;
2688
2689                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2690                 /* handle HPD short pulse irq */
2691                 if (aconnector->mst_mgr.mst_state)
2692                         drm_dp_mst_hpd_irq(
2693                                 &aconnector->mst_mgr,
2694                                 esi,
2695                                 &new_irq_handled);
2696
2697                 if (new_irq_handled) {
2698                         /* ACK at DPCD to notify down stream */
2699                         const int ack_dpcd_bytes_to_write =
2700                                 dpcd_bytes_to_read - 1;
2701
2702                         for (retry = 0; retry < 3; retry++) {
2703                                 uint8_t wret;
2704
2705                                 wret = drm_dp_dpcd_write(
2706                                         &aconnector->dm_dp_aux.aux,
2707                                         dpcd_addr + 1,
2708                                         &esi[1],
2709                                         ack_dpcd_bytes_to_write);
2710                                 if (wret == ack_dpcd_bytes_to_write)
2711                                         break;
2712                         }
2713
2714                         /* check if there is new irq to be handled */
2715                         dret = drm_dp_dpcd_read(
2716                                 &aconnector->dm_dp_aux.aux,
2717                                 dpcd_addr,
2718                                 esi,
2719                                 dpcd_bytes_to_read);
2720
2721                         new_irq_handled = false;
2722                 } else {
2723                         break;
2724                 }
2725         }
2726
2727         if (process_count == max_process_count)
2728                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2729 }
2730
2731 static void handle_hpd_rx_irq(void *param)
2732 {
2733         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2734         struct drm_connector *connector = &aconnector->base;
2735         struct drm_device *dev = connector->dev;
2736         struct dc_link *dc_link = aconnector->dc_link;
2737         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2738         bool result = false;
2739         enum dc_connection_type new_connection_type = dc_connection_none;
2740         struct amdgpu_device *adev = drm_to_adev(dev);
2741         union hpd_irq_data hpd_irq_data;
2742
2743         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2744
2745         if (adev->dm.disable_hpd_irq)
2746                 return;
2747
2748
2749         /*
2750          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2751          * conflict, after implement i2c helper, this mutex should be
2752          * retired.
2753          */
2754         mutex_lock(&aconnector->hpd_lock);
2755
2756         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2757
2758         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2759                 (dc_link->type == dc_connection_mst_branch)) {
2760                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2761                         result = true;
2762                         dm_handle_hpd_rx_irq(aconnector);
2763                         goto out;
2764                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2765                         result = false;
2766                         dm_handle_hpd_rx_irq(aconnector);
2767                         goto out;
2768                 }
2769         }
2770
2771         if (!amdgpu_in_reset(adev)) {
2772                 mutex_lock(&adev->dm.dc_lock);
2773 #ifdef CONFIG_DRM_AMD_DC_HDCP
2774         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2775 #else
2776         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2777 #endif
2778                 mutex_unlock(&adev->dm.dc_lock);
2779         }
2780
2781 out:
2782         if (result && !is_mst_root_connector) {
2783                 /* Downstream Port status changed. */
2784                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2785                         DRM_ERROR("KMS: Failed to detect connector\n");
2786
2787                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2788                         emulated_link_detect(dc_link);
2789
2790                         if (aconnector->fake_enable)
2791                                 aconnector->fake_enable = false;
2792
2793                         amdgpu_dm_update_connector_after_detect(aconnector);
2794
2795
2796                         drm_modeset_lock_all(dev);
2797                         dm_restore_drm_connector_state(dev, connector);
2798                         drm_modeset_unlock_all(dev);
2799
2800                         drm_kms_helper_hotplug_event(dev);
2801                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2802
2803                         if (aconnector->fake_enable)
2804                                 aconnector->fake_enable = false;
2805
2806                         amdgpu_dm_update_connector_after_detect(aconnector);
2807
2808
2809                         drm_modeset_lock_all(dev);
2810                         dm_restore_drm_connector_state(dev, connector);
2811                         drm_modeset_unlock_all(dev);
2812
2813                         drm_kms_helper_hotplug_event(dev);
2814                 }
2815         }
2816 #ifdef CONFIG_DRM_AMD_DC_HDCP
2817         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2818                 if (adev->dm.hdcp_workqueue)
2819                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2820         }
2821 #endif
2822
2823         if (dc_link->type != dc_connection_mst_branch)
2824                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2825
2826         mutex_unlock(&aconnector->hpd_lock);
2827 }
2828
2829 static void register_hpd_handlers(struct amdgpu_device *adev)
2830 {
2831         struct drm_device *dev = adev_to_drm(adev);
2832         struct drm_connector *connector;
2833         struct amdgpu_dm_connector *aconnector;
2834         const struct dc_link *dc_link;
2835         struct dc_interrupt_params int_params = {0};
2836
2837         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2838         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2839
2840         list_for_each_entry(connector,
2841                         &dev->mode_config.connector_list, head) {
2842
2843                 aconnector = to_amdgpu_dm_connector(connector);
2844                 dc_link = aconnector->dc_link;
2845
2846                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2847                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2848                         int_params.irq_source = dc_link->irq_source_hpd;
2849
2850                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2851                                         handle_hpd_irq,
2852                                         (void *) aconnector);
2853                 }
2854
2855                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2856
2857                         /* Also register for DP short pulse (hpd_rx). */
2858                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2859                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2860
2861                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2862                                         handle_hpd_rx_irq,
2863                                         (void *) aconnector);
2864                 }
2865         }
2866 }
2867
2868 #if defined(CONFIG_DRM_AMD_DC_SI)
2869 /* Register IRQ sources and initialize IRQ callbacks */
2870 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2871 {
2872         struct dc *dc = adev->dm.dc;
2873         struct common_irq_params *c_irq_params;
2874         struct dc_interrupt_params int_params = {0};
2875         int r;
2876         int i;
2877         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2878
2879         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2880         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2881
2882         /*
2883          * Actions of amdgpu_irq_add_id():
2884          * 1. Register a set() function with base driver.
2885          *    Base driver will call set() function to enable/disable an
2886          *    interrupt in DC hardware.
2887          * 2. Register amdgpu_dm_irq_handler().
2888          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2889          *    coming from DC hardware.
2890          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2891          *    for acknowledging and handling. */
2892
2893         /* Use VBLANK interrupt */
2894         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2895                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2896                 if (r) {
2897                         DRM_ERROR("Failed to add crtc irq id!\n");
2898                         return r;
2899                 }
2900
2901                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2902                 int_params.irq_source =
2903                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2904
2905                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2906
2907                 c_irq_params->adev = adev;
2908                 c_irq_params->irq_src = int_params.irq_source;
2909
2910                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2911                                 dm_crtc_high_irq, c_irq_params);
2912         }
2913
2914         /* Use GRPH_PFLIP interrupt */
2915         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2916                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2917                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2918                 if (r) {
2919                         DRM_ERROR("Failed to add page flip irq id!\n");
2920                         return r;
2921                 }
2922
2923                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2924                 int_params.irq_source =
2925                         dc_interrupt_to_irq_source(dc, i, 0);
2926
2927                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2928
2929                 c_irq_params->adev = adev;
2930                 c_irq_params->irq_src = int_params.irq_source;
2931
2932                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2933                                 dm_pflip_high_irq, c_irq_params);
2934
2935         }
2936
2937         /* HPD */
2938         r = amdgpu_irq_add_id(adev, client_id,
2939                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2940         if (r) {
2941                 DRM_ERROR("Failed to add hpd irq id!\n");
2942                 return r;
2943         }
2944
2945         register_hpd_handlers(adev);
2946
2947         return 0;
2948 }
2949 #endif
2950
2951 /* Register IRQ sources and initialize IRQ callbacks */
2952 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2953 {
2954         struct dc *dc = adev->dm.dc;
2955         struct common_irq_params *c_irq_params;
2956         struct dc_interrupt_params int_params = {0};
2957         int r;
2958         int i;
2959         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2960
2961         if (adev->asic_type >= CHIP_VEGA10)
2962                 client_id = SOC15_IH_CLIENTID_DCE;
2963
2964         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2965         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2966
2967         /*
2968          * Actions of amdgpu_irq_add_id():
2969          * 1. Register a set() function with base driver.
2970          *    Base driver will call set() function to enable/disable an
2971          *    interrupt in DC hardware.
2972          * 2. Register amdgpu_dm_irq_handler().
2973          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2974          *    coming from DC hardware.
2975          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2976          *    for acknowledging and handling. */
2977
2978         /* Use VBLANK interrupt */
2979         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2980                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2981                 if (r) {
2982                         DRM_ERROR("Failed to add crtc irq id!\n");
2983                         return r;
2984                 }
2985
2986                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2987                 int_params.irq_source =
2988                         dc_interrupt_to_irq_source(dc, i, 0);
2989
2990                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2991
2992                 c_irq_params->adev = adev;
2993                 c_irq_params->irq_src = int_params.irq_source;
2994
2995                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2996                                 dm_crtc_high_irq, c_irq_params);
2997         }
2998
2999         /* Use VUPDATE interrupt */
3000         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3001                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3002                 if (r) {
3003                         DRM_ERROR("Failed to add vupdate irq id!\n");
3004                         return r;
3005                 }
3006
3007                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3008                 int_params.irq_source =
3009                         dc_interrupt_to_irq_source(dc, i, 0);
3010
3011                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3012
3013                 c_irq_params->adev = adev;
3014                 c_irq_params->irq_src = int_params.irq_source;
3015
3016                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3017                                 dm_vupdate_high_irq, c_irq_params);
3018         }
3019
3020         /* Use GRPH_PFLIP interrupt */
3021         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3022                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3023                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3024                 if (r) {
3025                         DRM_ERROR("Failed to add page flip irq id!\n");
3026                         return r;
3027                 }
3028
3029                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3030                 int_params.irq_source =
3031                         dc_interrupt_to_irq_source(dc, i, 0);
3032
3033                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3034
3035                 c_irq_params->adev = adev;
3036                 c_irq_params->irq_src = int_params.irq_source;
3037
3038                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3039                                 dm_pflip_high_irq, c_irq_params);
3040
3041         }
3042
3043         /* HPD */
3044         r = amdgpu_irq_add_id(adev, client_id,
3045                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3046         if (r) {
3047                 DRM_ERROR("Failed to add hpd irq id!\n");
3048                 return r;
3049         }
3050
3051         register_hpd_handlers(adev);
3052
3053         return 0;
3054 }
3055
3056 #if defined(CONFIG_DRM_AMD_DC_DCN)
3057 /* Register IRQ sources and initialize IRQ callbacks */
3058 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3059 {
3060         struct dc *dc = adev->dm.dc;
3061         struct common_irq_params *c_irq_params;
3062         struct dc_interrupt_params int_params = {0};
3063         int r;
3064         int i;
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066         static const unsigned int vrtl_int_srcid[] = {
3067                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3068                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3069                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3070                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3071                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3072                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3073         };
3074 #endif
3075
3076         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3077         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3078
3079         /*
3080          * Actions of amdgpu_irq_add_id():
3081          * 1. Register a set() function with base driver.
3082          *    Base driver will call set() function to enable/disable an
3083          *    interrupt in DC hardware.
3084          * 2. Register amdgpu_dm_irq_handler().
3085          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3086          *    coming from DC hardware.
3087          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3088          *    for acknowledging and handling.
3089          */
3090
3091         /* Use VSTARTUP interrupt */
3092         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3093                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3094                         i++) {
3095                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3096
3097                 if (r) {
3098                         DRM_ERROR("Failed to add crtc irq id!\n");
3099                         return r;
3100                 }
3101
3102                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3103                 int_params.irq_source =
3104                         dc_interrupt_to_irq_source(dc, i, 0);
3105
3106                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3107
3108                 c_irq_params->adev = adev;
3109                 c_irq_params->irq_src = int_params.irq_source;
3110
3111                 amdgpu_dm_irq_register_interrupt(
3112                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3113         }
3114
3115         /* Use otg vertical line interrupt */
3116 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3117         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3118                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3119                                 vrtl_int_srcid[i], &adev->vline0_irq);
3120
3121                 if (r) {
3122                         DRM_ERROR("Failed to add vline0 irq id!\n");
3123                         return r;
3124                 }
3125
3126                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3127                 int_params.irq_source =
3128                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3129
3130                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3131                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3132                         break;
3133                 }
3134
3135                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3136                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3137
3138                 c_irq_params->adev = adev;
3139                 c_irq_params->irq_src = int_params.irq_source;
3140
3141                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3142                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3143         }
3144 #endif
3145
3146         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3147          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3148          * to trigger at end of each vblank, regardless of state of the lock,
3149          * matching DCE behaviour.
3150          */
3151         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3152              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3153              i++) {
3154                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3155
3156                 if (r) {
3157                         DRM_ERROR("Failed to add vupdate irq id!\n");
3158                         return r;
3159                 }
3160
3161                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3162                 int_params.irq_source =
3163                         dc_interrupt_to_irq_source(dc, i, 0);
3164
3165                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3166
3167                 c_irq_params->adev = adev;
3168                 c_irq_params->irq_src = int_params.irq_source;
3169
3170                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3171                                 dm_vupdate_high_irq, c_irq_params);
3172         }
3173
3174         /* Use GRPH_PFLIP interrupt */
3175         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3176                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3177                         i++) {
3178                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3179                 if (r) {
3180                         DRM_ERROR("Failed to add page flip irq id!\n");
3181                         return r;
3182                 }
3183
3184                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3185                 int_params.irq_source =
3186                         dc_interrupt_to_irq_source(dc, i, 0);
3187
3188                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3189
3190                 c_irq_params->adev = adev;
3191                 c_irq_params->irq_src = int_params.irq_source;
3192
3193                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3194                                 dm_pflip_high_irq, c_irq_params);
3195
3196         }
3197
3198         /* HPD */
3199         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3200                         &adev->hpd_irq);
3201         if (r) {
3202                 DRM_ERROR("Failed to add hpd irq id!\n");
3203                 return r;
3204         }
3205
3206         register_hpd_handlers(adev);
3207
3208         return 0;
3209 }
3210 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3211 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3212 {
3213         struct dc *dc = adev->dm.dc;
3214         struct common_irq_params *c_irq_params;
3215         struct dc_interrupt_params int_params = {0};
3216         int r, i;
3217
3218         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3219         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3220
3221         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3222                         &adev->dmub_outbox_irq);
3223         if (r) {
3224                 DRM_ERROR("Failed to add outbox irq id!\n");
3225                 return r;
3226         }
3227
3228         if (dc->ctx->dmub_srv) {
3229                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3230                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3231                 int_params.irq_source =
3232                 dc_interrupt_to_irq_source(dc, i, 0);
3233
3234                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3235
3236                 c_irq_params->adev = adev;
3237                 c_irq_params->irq_src = int_params.irq_source;
3238
3239                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3240                                 dm_dmub_outbox1_low_irq, c_irq_params);
3241         }
3242
3243         return 0;
3244 }
3245 #endif
3246
3247 /*
3248  * Acquires the lock for the atomic state object and returns
3249  * the new atomic state.
3250  *
3251  * This should only be called during atomic check.
3252  */
3253 static int dm_atomic_get_state(struct drm_atomic_state *state,
3254                                struct dm_atomic_state **dm_state)
3255 {
3256         struct drm_device *dev = state->dev;
3257         struct amdgpu_device *adev = drm_to_adev(dev);
3258         struct amdgpu_display_manager *dm = &adev->dm;
3259         struct drm_private_state *priv_state;
3260
3261         if (*dm_state)
3262                 return 0;
3263
3264         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3265         if (IS_ERR(priv_state))
3266                 return PTR_ERR(priv_state);
3267
3268         *dm_state = to_dm_atomic_state(priv_state);
3269
3270         return 0;
3271 }
3272
3273 static struct dm_atomic_state *
3274 dm_atomic_get_new_state(struct drm_atomic_state *state)
3275 {
3276         struct drm_device *dev = state->dev;
3277         struct amdgpu_device *adev = drm_to_adev(dev);
3278         struct amdgpu_display_manager *dm = &adev->dm;
3279         struct drm_private_obj *obj;
3280         struct drm_private_state *new_obj_state;
3281         int i;
3282
3283         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3284                 if (obj->funcs == dm->atomic_obj.funcs)
3285                         return to_dm_atomic_state(new_obj_state);
3286         }
3287
3288         return NULL;
3289 }
3290
3291 static struct drm_private_state *
3292 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3293 {
3294         struct dm_atomic_state *old_state, *new_state;
3295
3296         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3297         if (!new_state)
3298                 return NULL;
3299
3300         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3301
3302         old_state = to_dm_atomic_state(obj->state);
3303
3304         if (old_state && old_state->context)
3305                 new_state->context = dc_copy_state(old_state->context);
3306
3307         if (!new_state->context) {
3308                 kfree(new_state);
3309                 return NULL;
3310         }
3311
3312         return &new_state->base;
3313 }
3314
3315 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3316                                     struct drm_private_state *state)
3317 {
3318         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3319
3320         if (dm_state && dm_state->context)
3321                 dc_release_state(dm_state->context);
3322
3323         kfree(dm_state);
3324 }
3325
3326 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3327         .atomic_duplicate_state = dm_atomic_duplicate_state,
3328         .atomic_destroy_state = dm_atomic_destroy_state,
3329 };
3330
3331 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3332 {
3333         struct dm_atomic_state *state;
3334         int r;
3335
3336         adev->mode_info.mode_config_initialized = true;
3337
3338         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3339         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3340
3341         adev_to_drm(adev)->mode_config.max_width = 16384;
3342         adev_to_drm(adev)->mode_config.max_height = 16384;
3343
3344         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3345         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3346         /* indicates support for immediate flip */
3347         adev_to_drm(adev)->mode_config.async_page_flip = true;
3348
3349         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3350
3351         state = kzalloc(sizeof(*state), GFP_KERNEL);
3352         if (!state)
3353                 return -ENOMEM;
3354
3355         state->context = dc_create_state(adev->dm.dc);
3356         if (!state->context) {
3357                 kfree(state);
3358                 return -ENOMEM;
3359         }
3360
3361         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3362
3363         drm_atomic_private_obj_init(adev_to_drm(adev),
3364                                     &adev->dm.atomic_obj,
3365                                     &state->base,
3366                                     &dm_atomic_state_funcs);
3367
3368         r = amdgpu_display_modeset_create_props(adev);
3369         if (r) {
3370                 dc_release_state(state->context);
3371                 kfree(state);
3372                 return r;
3373         }
3374
3375         r = amdgpu_dm_audio_init(adev);
3376         if (r) {
3377                 dc_release_state(state->context);
3378                 kfree(state);
3379                 return r;
3380         }
3381
3382         return 0;
3383 }
3384
3385 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3386 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3387 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3388
3389 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3390         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3391
3392 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3393 {
3394 #if defined(CONFIG_ACPI)
3395         struct amdgpu_dm_backlight_caps caps;
3396
3397         memset(&caps, 0, sizeof(caps));
3398
3399         if (dm->backlight_caps.caps_valid)
3400                 return;
3401
3402         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3403         if (caps.caps_valid) {
3404                 dm->backlight_caps.caps_valid = true;
3405                 if (caps.aux_support)
3406                         return;
3407                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3408                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3409         } else {
3410                 dm->backlight_caps.min_input_signal =
3411                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3412                 dm->backlight_caps.max_input_signal =
3413                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3414         }
3415 #else
3416         if (dm->backlight_caps.aux_support)
3417                 return;
3418
3419         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3420         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3421 #endif
3422 }
3423
3424 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3425                                 unsigned *min, unsigned *max)
3426 {
3427         if (!caps)
3428                 return 0;
3429
3430         if (caps->aux_support) {
3431                 // Firmware limits are in nits, DC API wants millinits.
3432                 *max = 1000 * caps->aux_max_input_signal;
3433                 *min = 1000 * caps->aux_min_input_signal;
3434         } else {
3435                 // Firmware limits are 8-bit, PWM control is 16-bit.
3436                 *max = 0x101 * caps->max_input_signal;
3437                 *min = 0x101 * caps->min_input_signal;
3438         }
3439         return 1;
3440 }
3441
3442 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3443                                         uint32_t brightness)
3444 {
3445         unsigned min, max;
3446
3447         if (!get_brightness_range(caps, &min, &max))
3448                 return brightness;
3449
3450         // Rescale 0..255 to min..max
3451         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3452                                        AMDGPU_MAX_BL_LEVEL);
3453 }
3454
3455 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3456                                       uint32_t brightness)
3457 {
3458         unsigned min, max;
3459
3460         if (!get_brightness_range(caps, &min, &max))
3461                 return brightness;
3462
3463         if (brightness < min)
3464                 return 0;
3465         // Rescale min..max to 0..255
3466         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3467                                  max - min);
3468 }
3469
3470 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3471                                          u32 user_brightness)
3472 {
3473         struct amdgpu_dm_backlight_caps caps;
3474         struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3475         u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3476         bool rc;
3477         int i;
3478
3479         amdgpu_dm_update_backlight_caps(dm);
3480         caps = dm->backlight_caps;
3481
3482         for (i = 0; i < dm->num_of_edps; i++) {
3483                 dm->brightness[i] = user_brightness;
3484                 brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3485                 link[i] = (struct dc_link *)dm->backlight_link[i];
3486         }
3487
3488         /* Change brightness based on AUX property */
3489         if (caps.aux_support) {
3490                 for (i = 0; i < dm->num_of_edps; i++) {
3491                         rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3492                                 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3493                         if (!rc) {
3494                                 DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3495                                 break;
3496                         }
3497                 }
3498         } else {
3499                 for (i = 0; i < dm->num_of_edps; i++) {
3500                         rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3501                         if (!rc) {
3502                                 DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
3503                                 break;
3504                         }
3505                 }
3506         }
3507
3508         return rc ? 0 : 1;
3509 }
3510
3511 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3512 {
3513         struct amdgpu_display_manager *dm = bl_get_data(bd);
3514
3515         amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3516
3517         return 0;
3518 }
3519
3520 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3521 {
3522         struct amdgpu_dm_backlight_caps caps;
3523
3524         amdgpu_dm_update_backlight_caps(dm);
3525         caps = dm->backlight_caps;
3526
3527         if (caps.aux_support) {
3528                 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3529                 u32 avg, peak;
3530                 bool rc;
3531
3532                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3533                 if (!rc)
3534                         return dm->brightness[0];
3535                 return convert_brightness_to_user(&caps, avg);
3536         } else {
3537                 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3538
3539                 if (ret == DC_ERROR_UNEXPECTED)
3540                         return dm->brightness[0];
3541                 return convert_brightness_to_user(&caps, ret);
3542         }
3543 }
3544
3545 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3546 {
3547         struct amdgpu_display_manager *dm = bl_get_data(bd);
3548
3549         return amdgpu_dm_backlight_get_level(dm);
3550 }
3551
3552 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3553         .options = BL_CORE_SUSPENDRESUME,
3554         .get_brightness = amdgpu_dm_backlight_get_brightness,
3555         .update_status  = amdgpu_dm_backlight_update_status,
3556 };
3557
3558 static void
3559 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3560 {
3561         char bl_name[16];
3562         struct backlight_properties props = { 0 };
3563         int i;
3564
3565         amdgpu_dm_update_backlight_caps(dm);
3566         for (i = 0; i < dm->num_of_edps; i++)
3567                 dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3568
3569         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3570         props.brightness = AMDGPU_MAX_BL_LEVEL;
3571         props.type = BACKLIGHT_RAW;
3572
3573         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3574                  adev_to_drm(dm->adev)->primary->index);
3575
3576         dm->backlight_dev = backlight_device_register(bl_name,
3577                                                       adev_to_drm(dm->adev)->dev,
3578                                                       dm,
3579                                                       &amdgpu_dm_backlight_ops,
3580                                                       &props);
3581
3582         if (IS_ERR(dm->backlight_dev))
3583                 DRM_ERROR("DM: Backlight registration failed!\n");
3584         else
3585                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3586 }
3587
3588 #endif
3589
3590 static int initialize_plane(struct amdgpu_display_manager *dm,
3591                             struct amdgpu_mode_info *mode_info, int plane_id,
3592                             enum drm_plane_type plane_type,
3593                             const struct dc_plane_cap *plane_cap)
3594 {
3595         struct drm_plane *plane;
3596         unsigned long possible_crtcs;
3597         int ret = 0;
3598
3599         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3600         if (!plane) {
3601                 DRM_ERROR("KMS: Failed to allocate plane\n");
3602                 return -ENOMEM;
3603         }
3604         plane->type = plane_type;
3605
3606         /*
3607          * HACK: IGT tests expect that the primary plane for a CRTC
3608          * can only have one possible CRTC. Only expose support for
3609          * any CRTC if they're not going to be used as a primary plane
3610          * for a CRTC - like overlay or underlay planes.
3611          */
3612         possible_crtcs = 1 << plane_id;
3613         if (plane_id >= dm->dc->caps.max_streams)
3614                 possible_crtcs = 0xff;
3615
3616         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3617
3618         if (ret) {
3619                 DRM_ERROR("KMS: Failed to initialize plane\n");
3620                 kfree(plane);
3621                 return ret;
3622         }
3623
3624         if (mode_info)
3625                 mode_info->planes[plane_id] = plane;
3626
3627         return ret;
3628 }
3629
3630
3631 static void register_backlight_device(struct amdgpu_display_manager *dm,
3632                                       struct dc_link *link)
3633 {
3634 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3635         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3636
3637         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3638             link->type != dc_connection_none) {
3639                 /*
3640                  * Event if registration failed, we should continue with
3641                  * DM initialization because not having a backlight control
3642                  * is better then a black screen.
3643                  */
3644                 if (!dm->backlight_dev)
3645                         amdgpu_dm_register_backlight_device(dm);
3646
3647                 if (dm->backlight_dev) {
3648                         dm->backlight_link[dm->num_of_edps] = link;
3649                         dm->num_of_edps++;
3650                 }
3651         }
3652 #endif
3653 }
3654
3655
3656 /*
3657  * In this architecture, the association
3658  * connector -> encoder -> crtc
3659  * id not really requried. The crtc and connector will hold the
3660  * display_index as an abstraction to use with DAL component
3661  *
3662  * Returns 0 on success
3663  */
3664 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3665 {
3666         struct amdgpu_display_manager *dm = &adev->dm;
3667         int32_t i;
3668         struct amdgpu_dm_connector *aconnector = NULL;
3669         struct amdgpu_encoder *aencoder = NULL;
3670         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3671         uint32_t link_cnt;
3672         int32_t primary_planes;
3673         enum dc_connection_type new_connection_type = dc_connection_none;
3674         const struct dc_plane_cap *plane;
3675
3676         dm->display_indexes_num = dm->dc->caps.max_streams;
3677         /* Update the actual used number of crtc */
3678         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3679
3680         link_cnt = dm->dc->caps.max_links;
3681         if (amdgpu_dm_mode_config_init(dm->adev)) {
3682                 DRM_ERROR("DM: Failed to initialize mode config\n");
3683                 return -EINVAL;
3684         }
3685
3686         /* There is one primary plane per CRTC */
3687         primary_planes = dm->dc->caps.max_streams;
3688         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3689
3690         /*
3691          * Initialize primary planes, implicit planes for legacy IOCTLS.
3692          * Order is reversed to match iteration order in atomic check.
3693          */
3694         for (i = (primary_planes - 1); i >= 0; i--) {
3695                 plane = &dm->dc->caps.planes[i];
3696
3697                 if (initialize_plane(dm, mode_info, i,
3698                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3699                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3700                         goto fail;
3701                 }
3702         }
3703
3704         /*
3705          * Initialize overlay planes, index starting after primary planes.
3706          * These planes have a higher DRM index than the primary planes since
3707          * they should be considered as having a higher z-order.
3708          * Order is reversed to match iteration order in atomic check.
3709          *
3710          * Only support DCN for now, and only expose one so we don't encourage
3711          * userspace to use up all the pipes.
3712          */
3713         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3714                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3715
3716                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3717                         continue;
3718
3719                 if (!plane->blends_with_above || !plane->blends_with_below)
3720                         continue;
3721
3722                 if (!plane->pixel_format_support.argb8888)
3723                         continue;
3724
3725                 if (initialize_plane(dm, NULL, primary_planes + i,
3726                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3727                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3728                         goto fail;
3729                 }
3730
3731                 /* Only create one overlay plane. */
3732                 break;
3733         }
3734
3735         for (i = 0; i < dm->dc->caps.max_streams; i++)
3736                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3737                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3738                         goto fail;
3739                 }
3740
3741 #if defined(CONFIG_DRM_AMD_DC_DCN)
3742         /* Use Outbox interrupt */
3743         switch (adev->asic_type) {
3744         case CHIP_SIENNA_CICHLID:
3745         case CHIP_NAVY_FLOUNDER:
3746         case CHIP_RENOIR:
3747                 if (register_outbox_irq_handlers(dm->adev)) {
3748                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3749                         goto fail;
3750                 }
3751                 break;
3752         default:
3753                 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3754         }
3755 #endif
3756
3757         /* loops over all connectors on the board */
3758         for (i = 0; i < link_cnt; i++) {
3759                 struct dc_link *link = NULL;
3760
3761                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3762                         DRM_ERROR(
3763                                 "KMS: Cannot support more than %d display indexes\n",
3764                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3765                         continue;
3766                 }
3767
3768                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3769                 if (!aconnector)
3770                         goto fail;
3771
3772                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3773                 if (!aencoder)
3774                         goto fail;
3775
3776                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3777                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3778                         goto fail;
3779                 }
3780
3781                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3782                         DRM_ERROR("KMS: Failed to initialize connector\n");
3783                         goto fail;
3784                 }
3785
3786                 link = dc_get_link_at_index(dm->dc, i);
3787
3788                 if (!dc_link_detect_sink(link, &new_connection_type))
3789                         DRM_ERROR("KMS: Failed to detect connector\n");
3790
3791                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3792                         emulated_link_detect(link);
3793                         amdgpu_dm_update_connector_after_detect(aconnector);
3794
3795                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3796                         amdgpu_dm_update_connector_after_detect(aconnector);
3797                         register_backlight_device(dm, link);
3798                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3799                                 amdgpu_dm_set_psr_caps(link);
3800                 }
3801
3802
3803         }
3804
3805         /* Software is initialized. Now we can register interrupt handlers. */
3806         switch (adev->asic_type) {
3807 #if defined(CONFIG_DRM_AMD_DC_SI)
3808         case CHIP_TAHITI:
3809         case CHIP_PITCAIRN:
3810         case CHIP_VERDE:
3811         case CHIP_OLAND:
3812                 if (dce60_register_irq_handlers(dm->adev)) {
3813                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3814                         goto fail;
3815                 }
3816                 break;
3817 #endif
3818         case CHIP_BONAIRE:
3819         case CHIP_HAWAII:
3820         case CHIP_KAVERI:
3821         case CHIP_KABINI:
3822         case CHIP_MULLINS:
3823         case CHIP_TONGA:
3824         case CHIP_FIJI:
3825         case CHIP_CARRIZO:
3826         case CHIP_STONEY:
3827         case CHIP_POLARIS11:
3828         case CHIP_POLARIS10:
3829         case CHIP_POLARIS12:
3830         case CHIP_VEGAM:
3831         case CHIP_VEGA10:
3832         case CHIP_VEGA12:
3833         case CHIP_VEGA20:
3834                 if (dce110_register_irq_handlers(dm->adev)) {
3835                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3836                         goto fail;
3837                 }
3838                 break;
3839 #if defined(CONFIG_DRM_AMD_DC_DCN)
3840         case CHIP_RAVEN:
3841         case CHIP_NAVI12:
3842         case CHIP_NAVI10:
3843         case CHIP_NAVI14:
3844         case CHIP_RENOIR:
3845         case CHIP_SIENNA_CICHLID:
3846         case CHIP_NAVY_FLOUNDER:
3847         case CHIP_DIMGREY_CAVEFISH:
3848         case CHIP_BEIGE_GOBY:
3849         case CHIP_VANGOGH:
3850                 if (dcn10_register_irq_handlers(dm->adev)) {
3851                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3852                         goto fail;
3853                 }
3854                 break;
3855 #endif
3856         default:
3857                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3858                 goto fail;
3859         }
3860
3861         return 0;
3862 fail:
3863         kfree(aencoder);
3864         kfree(aconnector);
3865
3866         return -EINVAL;
3867 }
3868
3869 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3870 {
3871         drm_mode_config_cleanup(dm->ddev);
3872         drm_atomic_private_obj_fini(&dm->atomic_obj);
3873         return;
3874 }
3875
3876 /******************************************************************************
3877  * amdgpu_display_funcs functions
3878  *****************************************************************************/
3879
3880 /*
3881  * dm_bandwidth_update - program display watermarks
3882  *
3883  * @adev: amdgpu_device pointer
3884  *
3885  * Calculate and program the display watermarks and line buffer allocation.
3886  */
3887 static void dm_bandwidth_update(struct amdgpu_device *adev)
3888 {
3889         /* TODO: implement later */
3890 }
3891
3892 static const struct amdgpu_display_funcs dm_display_funcs = {
3893         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3894         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3895         .backlight_set_level = NULL, /* never called for DC */
3896         .backlight_get_level = NULL, /* never called for DC */
3897         .hpd_sense = NULL,/* called unconditionally */
3898         .hpd_set_polarity = NULL, /* called unconditionally */
3899         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3900         .page_flip_get_scanoutpos =
3901                 dm_crtc_get_scanoutpos,/* called unconditionally */
3902         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3903         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3904 };
3905
3906 #if defined(CONFIG_DEBUG_KERNEL_DC)
3907
3908 static ssize_t s3_debug_store(struct device *device,
3909                               struct device_attribute *attr,
3910                               const char *buf,
3911                               size_t count)
3912 {
3913         int ret;
3914         int s3_state;
3915         struct drm_device *drm_dev = dev_get_drvdata(device);
3916         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3917
3918         ret = kstrtoint(buf, 0, &s3_state);
3919
3920         if (ret == 0) {
3921                 if (s3_state) {
3922                         dm_resume(adev);
3923                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3924                 } else
3925                         dm_suspend(adev);
3926         }
3927
3928         return ret == 0 ? count : 0;
3929 }
3930
3931 DEVICE_ATTR_WO(s3_debug);
3932
3933 #endif
3934
3935 static int dm_early_init(void *handle)
3936 {
3937         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3938
3939         switch (adev->asic_type) {
3940 #if defined(CONFIG_DRM_AMD_DC_SI)
3941         case CHIP_TAHITI:
3942         case CHIP_PITCAIRN:
3943         case CHIP_VERDE:
3944                 adev->mode_info.num_crtc = 6;
3945                 adev->mode_info.num_hpd = 6;
3946                 adev->mode_info.num_dig = 6;
3947                 break;
3948         case CHIP_OLAND:
3949                 adev->mode_info.num_crtc = 2;
3950                 adev->mode_info.num_hpd = 2;
3951                 adev->mode_info.num_dig = 2;
3952                 break;
3953 #endif
3954         case CHIP_BONAIRE:
3955         case CHIP_HAWAII:
3956                 adev->mode_info.num_crtc = 6;
3957                 adev->mode_info.num_hpd = 6;
3958                 adev->mode_info.num_dig = 6;
3959                 break;
3960         case CHIP_KAVERI:
3961                 adev->mode_info.num_crtc = 4;
3962                 adev->mode_info.num_hpd = 6;
3963                 adev->mode_info.num_dig = 7;
3964                 break;
3965         case CHIP_KABINI:
3966         case CHIP_MULLINS:
3967                 adev->mode_info.num_crtc = 2;
3968                 adev->mode_info.num_hpd = 6;
3969                 adev->mode_info.num_dig = 6;
3970                 break;
3971         case CHIP_FIJI:
3972         case CHIP_TONGA:
3973                 adev->mode_info.num_crtc = 6;
3974                 adev->mode_info.num_hpd = 6;
3975                 adev->mode_info.num_dig = 7;
3976                 break;
3977         case CHIP_CARRIZO:
3978                 adev->mode_info.num_crtc = 3;
3979                 adev->mode_info.num_hpd = 6;
3980                 adev->mode_info.num_dig = 9;
3981                 break;
3982         case CHIP_STONEY:
3983                 adev->mode_info.num_crtc = 2;
3984                 adev->mode_info.num_hpd = 6;
3985                 adev->mode_info.num_dig = 9;
3986                 break;
3987         case CHIP_POLARIS11:
3988         case CHIP_POLARIS12:
3989                 adev->mode_info.num_crtc = 5;
3990                 adev->mode_info.num_hpd = 5;
3991                 adev->mode_info.num_dig = 5;
3992                 break;
3993         case CHIP_POLARIS10:
3994         case CHIP_VEGAM:
3995                 adev->mode_info.num_crtc = 6;
3996                 adev->mode_info.num_hpd = 6;
3997                 adev->mode_info.num_dig = 6;
3998                 break;
3999         case CHIP_VEGA10:
4000         case CHIP_VEGA12:
4001         case CHIP_VEGA20:
4002                 adev->mode_info.num_crtc = 6;
4003                 adev->mode_info.num_hpd = 6;
4004                 adev->mode_info.num_dig = 6;
4005                 break;
4006 #if defined(CONFIG_DRM_AMD_DC_DCN)
4007         case CHIP_RAVEN:
4008         case CHIP_RENOIR:
4009         case CHIP_VANGOGH:
4010                 adev->mode_info.num_crtc = 4;
4011                 adev->mode_info.num_hpd = 4;
4012                 adev->mode_info.num_dig = 4;
4013                 break;
4014         case CHIP_NAVI10:
4015         case CHIP_NAVI12:
4016         case CHIP_SIENNA_CICHLID:
4017         case CHIP_NAVY_FLOUNDER:
4018                 adev->mode_info.num_crtc = 6;
4019                 adev->mode_info.num_hpd = 6;
4020                 adev->mode_info.num_dig = 6;
4021                 break;
4022         case CHIP_NAVI14:
4023         case CHIP_DIMGREY_CAVEFISH:
4024                 adev->mode_info.num_crtc = 5;
4025                 adev->mode_info.num_hpd = 5;
4026                 adev->mode_info.num_dig = 5;
4027                 break;
4028         case CHIP_BEIGE_GOBY:
4029                 adev->mode_info.num_crtc = 2;
4030                 adev->mode_info.num_hpd = 2;
4031                 adev->mode_info.num_dig = 2;
4032                 break;
4033 #endif
4034         default:
4035                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4036                 return -EINVAL;
4037         }
4038
4039         amdgpu_dm_set_irq_funcs(adev);
4040
4041         if (adev->mode_info.funcs == NULL)
4042                 adev->mode_info.funcs = &dm_display_funcs;
4043
4044         /*
4045          * Note: Do NOT change adev->audio_endpt_rreg and
4046          * adev->audio_endpt_wreg because they are initialised in
4047          * amdgpu_device_init()
4048          */
4049 #if defined(CONFIG_DEBUG_KERNEL_DC)
4050         device_create_file(
4051                 adev_to_drm(adev)->dev,
4052                 &dev_attr_s3_debug);
4053 #endif
4054
4055         return 0;
4056 }
4057
4058 static bool modeset_required(struct drm_crtc_state *crtc_state,
4059                              struct dc_stream_state *new_stream,
4060                              struct dc_stream_state *old_stream)
4061 {
4062         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4063 }
4064
4065 static bool modereset_required(struct drm_crtc_state *crtc_state)
4066 {
4067         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4068 }
4069
4070 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4071 {
4072         drm_encoder_cleanup(encoder);
4073         kfree(encoder);
4074 }
4075
4076 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4077         .destroy = amdgpu_dm_encoder_destroy,
4078 };
4079
4080
4081 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4082                                          struct drm_framebuffer *fb,
4083                                          int *min_downscale, int *max_upscale)
4084 {
4085         struct amdgpu_device *adev = drm_to_adev(dev);
4086         struct dc *dc = adev->dm.dc;
4087         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4088         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4089
4090         switch (fb->format->format) {
4091         case DRM_FORMAT_P010:
4092         case DRM_FORMAT_NV12:
4093         case DRM_FORMAT_NV21:
4094                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4095                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4096                 break;
4097
4098         case DRM_FORMAT_XRGB16161616F:
4099         case DRM_FORMAT_ARGB16161616F:
4100         case DRM_FORMAT_XBGR16161616F:
4101         case DRM_FORMAT_ABGR16161616F:
4102                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4103                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4104                 break;
4105
4106         default:
4107                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4108                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4109                 break;
4110         }
4111
4112         /*
4113          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4114          * scaling factor of 1.0 == 1000 units.
4115          */
4116         if (*max_upscale == 1)
4117                 *max_upscale = 1000;
4118
4119         if (*min_downscale == 1)
4120                 *min_downscale = 1000;
4121 }
4122
4123
4124 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4125                                 struct dc_scaling_info *scaling_info)
4126 {
4127         int scale_w, scale_h, min_downscale, max_upscale;
4128
4129         memset(scaling_info, 0, sizeof(*scaling_info));
4130
4131         /* Source is fixed 16.16 but we ignore mantissa for now... */
4132         scaling_info->src_rect.x = state->src_x >> 16;
4133         scaling_info->src_rect.y = state->src_y >> 16;
4134
4135         /*
4136          * For reasons we don't (yet) fully understand a non-zero
4137          * src_y coordinate into an NV12 buffer can cause a
4138          * system hang. To avoid hangs (and maybe be overly cautious)
4139          * let's reject both non-zero src_x and src_y.
4140          *
4141          * We currently know of only one use-case to reproduce a
4142          * scenario with non-zero src_x and src_y for NV12, which
4143          * is to gesture the YouTube Android app into full screen
4144          * on ChromeOS.
4145          */
4146         if (state->fb &&
4147             state->fb->format->format == DRM_FORMAT_NV12 &&
4148             (scaling_info->src_rect.x != 0 ||
4149              scaling_info->src_rect.y != 0))
4150                 return -EINVAL;
4151
4152         scaling_info->src_rect.width = state->src_w >> 16;
4153         if (scaling_info->src_rect.width == 0)
4154                 return -EINVAL;
4155
4156         scaling_info->src_rect.height = state->src_h >> 16;
4157         if (scaling_info->src_rect.height == 0)
4158                 return -EINVAL;
4159
4160         scaling_info->dst_rect.x = state->crtc_x;
4161         scaling_info->dst_rect.y = state->crtc_y;
4162
4163         if (state->crtc_w == 0)
4164                 return -EINVAL;
4165
4166         scaling_info->dst_rect.width = state->crtc_w;
4167
4168         if (state->crtc_h == 0)
4169                 return -EINVAL;
4170
4171         scaling_info->dst_rect.height = state->crtc_h;
4172
4173         /* DRM doesn't specify clipping on destination output. */
4174         scaling_info->clip_rect = scaling_info->dst_rect;
4175
4176         /* Validate scaling per-format with DC plane caps */
4177         if (state->plane && state->plane->dev && state->fb) {
4178                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4179                                              &min_downscale, &max_upscale);
4180         } else {
4181                 min_downscale = 250;
4182                 max_upscale = 16000;
4183         }
4184
4185         scale_w = scaling_info->dst_rect.width * 1000 /
4186                   scaling_info->src_rect.width;
4187
4188         if (scale_w < min_downscale || scale_w > max_upscale)
4189                 return -EINVAL;
4190
4191         scale_h = scaling_info->dst_rect.height * 1000 /
4192                   scaling_info->src_rect.height;
4193
4194         if (scale_h < min_downscale || scale_h > max_upscale)
4195                 return -EINVAL;
4196
4197         /*
4198          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4199          * assume reasonable defaults based on the format.
4200          */
4201
4202         return 0;
4203 }
4204
4205 static void
4206 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4207                                  uint64_t tiling_flags)
4208 {
4209         /* Fill GFX8 params */
4210         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4211                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4212
4213                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4214                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4215                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4216                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4217                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4218
4219                 /* XXX fix me for VI */
4220                 tiling_info->gfx8.num_banks = num_banks;
4221                 tiling_info->gfx8.array_mode =
4222                                 DC_ARRAY_2D_TILED_THIN1;
4223                 tiling_info->gfx8.tile_split = tile_split;
4224                 tiling_info->gfx8.bank_width = bankw;
4225                 tiling_info->gfx8.bank_height = bankh;
4226                 tiling_info->gfx8.tile_aspect = mtaspect;
4227                 tiling_info->gfx8.tile_mode =
4228                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4229         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4230                         == DC_ARRAY_1D_TILED_THIN1) {
4231                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4232         }
4233
4234         tiling_info->gfx8.pipe_config =
4235                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4236 }
4237
4238 static void
4239 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4240                                   union dc_tiling_info *tiling_info)
4241 {
4242         tiling_info->gfx9.num_pipes =
4243                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4244         tiling_info->gfx9.num_banks =
4245                 adev->gfx.config.gb_addr_config_fields.num_banks;
4246         tiling_info->gfx9.pipe_interleave =
4247                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4248         tiling_info->gfx9.num_shader_engines =
4249                 adev->gfx.config.gb_addr_config_fields.num_se;
4250         tiling_info->gfx9.max_compressed_frags =
4251                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4252         tiling_info->gfx9.num_rb_per_se =
4253                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4254         tiling_info->gfx9.shaderEnable = 1;
4255         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4256             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4257             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4258             adev->asic_type == CHIP_BEIGE_GOBY ||
4259             adev->asic_type == CHIP_VANGOGH)
4260                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4261 }
4262
4263 static int
4264 validate_dcc(struct amdgpu_device *adev,
4265              const enum surface_pixel_format format,
4266              const enum dc_rotation_angle rotation,
4267              const union dc_tiling_info *tiling_info,
4268              const struct dc_plane_dcc_param *dcc,
4269              const struct dc_plane_address *address,
4270              const struct plane_size *plane_size)
4271 {
4272         struct dc *dc = adev->dm.dc;
4273         struct dc_dcc_surface_param input;
4274         struct dc_surface_dcc_cap output;
4275
4276         memset(&input, 0, sizeof(input));
4277         memset(&output, 0, sizeof(output));
4278
4279         if (!dcc->enable)
4280                 return 0;
4281
4282         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4283             !dc->cap_funcs.get_dcc_compression_cap)
4284                 return -EINVAL;
4285
4286         input.format = format;
4287         input.surface_size.width = plane_size->surface_size.width;
4288         input.surface_size.height = plane_size->surface_size.height;
4289         input.swizzle_mode = tiling_info->gfx9.swizzle;
4290
4291         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4292                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4293         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4294                 input.scan = SCAN_DIRECTION_VERTICAL;
4295
4296         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4297                 return -EINVAL;
4298
4299         if (!output.capable)
4300                 return -EINVAL;
4301
4302         if (dcc->independent_64b_blks == 0 &&
4303             output.grph.rgb.independent_64b_blks != 0)
4304                 return -EINVAL;
4305
4306         return 0;
4307 }
4308
4309 static bool
4310 modifier_has_dcc(uint64_t modifier)
4311 {
4312         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4313 }
4314
4315 static unsigned
4316 modifier_gfx9_swizzle_mode(uint64_t modifier)
4317 {
4318         if (modifier == DRM_FORMAT_MOD_LINEAR)
4319                 return 0;
4320
4321         return AMD_FMT_MOD_GET(TILE, modifier);
4322 }
4323
4324 static const struct drm_format_info *
4325 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4326 {
4327         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4328 }
4329
4330 static void
4331 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4332                                     union dc_tiling_info *tiling_info,
4333                                     uint64_t modifier)
4334 {
4335         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4336         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4337         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4338         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4339
4340         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4341
4342         if (!IS_AMD_FMT_MOD(modifier))
4343                 return;
4344
4345         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4346         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4347
4348         if (adev->family >= AMDGPU_FAMILY_NV) {
4349                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4350         } else {
4351                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4352
4353                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4354         }
4355 }
4356
4357 enum dm_micro_swizzle {
4358         MICRO_SWIZZLE_Z = 0,
4359         MICRO_SWIZZLE_S = 1,
4360         MICRO_SWIZZLE_D = 2,
4361         MICRO_SWIZZLE_R = 3
4362 };
4363
4364 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4365                                           uint32_t format,
4366                                           uint64_t modifier)
4367 {
4368         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4369         const struct drm_format_info *info = drm_format_info(format);
4370         int i;
4371
4372         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4373
4374         if (!info)
4375                 return false;
4376
4377         /*
4378          * We always have to allow these modifiers:
4379          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4380          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4381          */
4382         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4383             modifier == DRM_FORMAT_MOD_INVALID) {
4384                 return true;
4385         }
4386
4387         /* Check that the modifier is on the list of the plane's supported modifiers. */
4388         for (i = 0; i < plane->modifier_count; i++) {
4389                 if (modifier == plane->modifiers[i])
4390                         break;
4391         }
4392         if (i == plane->modifier_count)
4393                 return false;
4394
4395         /*
4396          * For D swizzle the canonical modifier depends on the bpp, so check
4397          * it here.
4398          */
4399         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4400             adev->family >= AMDGPU_FAMILY_NV) {
4401                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4402                         return false;
4403         }
4404
4405         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4406             info->cpp[0] < 8)
4407                 return false;
4408
4409         if (modifier_has_dcc(modifier)) {
4410                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4411                 if (info->cpp[0] != 4)
4412                         return false;
4413                 /* We support multi-planar formats, but not when combined with
4414                  * additional DCC metadata planes. */
4415                 if (info->num_planes > 1)
4416                         return false;
4417         }
4418
4419         return true;
4420 }
4421
4422 static void
4423 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4424 {
4425         if (!*mods)
4426                 return;
4427
4428         if (*cap - *size < 1) {
4429                 uint64_t new_cap = *cap * 2;
4430                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4431
4432                 if (!new_mods) {
4433                         kfree(*mods);
4434                         *mods = NULL;
4435                         return;
4436                 }
4437
4438                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4439                 kfree(*mods);
4440                 *mods = new_mods;
4441                 *cap = new_cap;
4442         }
4443
4444         (*mods)[*size] = mod;
4445         *size += 1;
4446 }
4447
4448 static void
4449 add_gfx9_modifiers(const struct amdgpu_device *adev,
4450                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4451 {
4452         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4453         int pipe_xor_bits = min(8, pipes +
4454                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4455         int bank_xor_bits = min(8 - pipe_xor_bits,
4456                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4457         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4458                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4459
4460
4461         if (adev->family == AMDGPU_FAMILY_RV) {
4462                 /* Raven2 and later */
4463                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4464
4465                 /*
4466                  * No _D DCC swizzles yet because we only allow 32bpp, which
4467                  * doesn't support _D on DCN
4468                  */
4469
4470                 if (has_constant_encode) {
4471                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4472                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4473                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4474                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4475                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4476                                     AMD_FMT_MOD_SET(DCC, 1) |
4477                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4478                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4479                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4480                 }
4481
4482                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4483                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4484                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4485                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4486                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4487                             AMD_FMT_MOD_SET(DCC, 1) |
4488                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4489                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4490                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4491
4492                 if (has_constant_encode) {
4493                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4494                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4495                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4496                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4497                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4498                                     AMD_FMT_MOD_SET(DCC, 1) |
4499                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4500                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4501                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4502
4503                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4504                                     AMD_FMT_MOD_SET(RB, rb) |
4505                                     AMD_FMT_MOD_SET(PIPE, pipes));
4506                 }
4507
4508                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4509                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4510                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4511                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4512                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4513                             AMD_FMT_MOD_SET(DCC, 1) |
4514                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4515                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4516                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4517                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4518                             AMD_FMT_MOD_SET(RB, rb) |
4519                             AMD_FMT_MOD_SET(PIPE, pipes));
4520         }
4521
4522         /*
4523          * Only supported for 64bpp on Raven, will be filtered on format in
4524          * dm_plane_format_mod_supported.
4525          */
4526         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4527                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4528                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4529                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4530                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4531
4532         if (adev->family == AMDGPU_FAMILY_RV) {
4533                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4534                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4535                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4536                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4537                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4538         }
4539
4540         /*
4541          * Only supported for 64bpp on Raven, will be filtered on format in
4542          * dm_plane_format_mod_supported.
4543          */
4544         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4545                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4546                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4547
4548         if (adev->family == AMDGPU_FAMILY_RV) {
4549                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4550                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4551                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4552         }
4553 }
4554
4555 static void
4556 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4557                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4558 {
4559         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4560
4561         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4562                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4563                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4564                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4565                     AMD_FMT_MOD_SET(DCC, 1) |
4566                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4567                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4568                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4569
4570         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4571                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4572                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4573                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4574                     AMD_FMT_MOD_SET(DCC, 1) |
4575                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4576                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4577                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4578                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4579
4580         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4581                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4582                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4583                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4584
4585         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4586                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4587                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4588                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4589
4590
4591         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4592         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4593                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4594                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4595
4596         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4597                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4598                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4599 }
4600
4601 static void
4602 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4603                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4604 {
4605         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4606         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4607
4608         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4609                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4610                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4611                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4612                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4613                     AMD_FMT_MOD_SET(DCC, 1) |
4614                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4615                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4616                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4617                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4618
4619         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4620                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4621                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4622                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4623                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4624                     AMD_FMT_MOD_SET(DCC, 1) |
4625                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4626                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4627                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4628                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4629                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4630
4631         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4632                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4633                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4634                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4635                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4636
4637         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4638                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4639                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4640                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4641                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4642
4643         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4644         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4645                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4646                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4647
4648         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4649                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4650                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4651 }
4652
4653 static int
4654 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4655 {
4656         uint64_t size = 0, capacity = 128;
4657         *mods = NULL;
4658
4659         /* We have not hooked up any pre-GFX9 modifiers. */
4660         if (adev->family < AMDGPU_FAMILY_AI)
4661                 return 0;
4662
4663         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4664
4665         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4666                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4667                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4668                 return *mods ? 0 : -ENOMEM;
4669         }
4670
4671         switch (adev->family) {
4672         case AMDGPU_FAMILY_AI:
4673         case AMDGPU_FAMILY_RV:
4674                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4675                 break;
4676         case AMDGPU_FAMILY_NV:
4677         case AMDGPU_FAMILY_VGH:
4678                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4679                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4680                 else
4681                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4682                 break;
4683         }
4684
4685         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4686
4687         /* INVALID marks the end of the list. */
4688         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4689
4690         if (!*mods)
4691                 return -ENOMEM;
4692
4693         return 0;
4694 }
4695
4696 static int
4697 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4698                                           const struct amdgpu_framebuffer *afb,
4699                                           const enum surface_pixel_format format,
4700                                           const enum dc_rotation_angle rotation,
4701                                           const struct plane_size *plane_size,
4702                                           union dc_tiling_info *tiling_info,
4703                                           struct dc_plane_dcc_param *dcc,
4704                                           struct dc_plane_address *address,
4705                                           const bool force_disable_dcc)
4706 {
4707         const uint64_t modifier = afb->base.modifier;
4708         int ret;
4709
4710         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4711         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4712
4713         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4714                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4715
4716                 dcc->enable = 1;
4717                 dcc->meta_pitch = afb->base.pitches[1];
4718                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4719
4720                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4721                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4722         }
4723
4724         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4725         if (ret)
4726                 return ret;
4727
4728         return 0;
4729 }
4730
4731 static int
4732 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4733                              const struct amdgpu_framebuffer *afb,
4734                              const enum surface_pixel_format format,
4735                              const enum dc_rotation_angle rotation,
4736                              const uint64_t tiling_flags,
4737                              union dc_tiling_info *tiling_info,
4738                              struct plane_size *plane_size,
4739                              struct dc_plane_dcc_param *dcc,
4740                              struct dc_plane_address *address,
4741                              bool tmz_surface,
4742                              bool force_disable_dcc)
4743 {
4744         const struct drm_framebuffer *fb = &afb->base;
4745         int ret;
4746
4747         memset(tiling_info, 0, sizeof(*tiling_info));
4748         memset(plane_size, 0, sizeof(*plane_size));
4749         memset(dcc, 0, sizeof(*dcc));
4750         memset(address, 0, sizeof(*address));
4751
4752         address->tmz_surface = tmz_surface;
4753
4754         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4755                 uint64_t addr = afb->address + fb->offsets[0];
4756
4757                 plane_size->surface_size.x = 0;
4758                 plane_size->surface_size.y = 0;
4759                 plane_size->surface_size.width = fb->width;
4760                 plane_size->surface_size.height = fb->height;
4761                 plane_size->surface_pitch =
4762                         fb->pitches[0] / fb->format->cpp[0];
4763
4764                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4765                 address->grph.addr.low_part = lower_32_bits(addr);
4766                 address->grph.addr.high_part = upper_32_bits(addr);
4767         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4768                 uint64_t luma_addr = afb->address + fb->offsets[0];
4769                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4770
4771                 plane_size->surface_size.x = 0;
4772                 plane_size->surface_size.y = 0;
4773                 plane_size->surface_size.width = fb->width;
4774                 plane_size->surface_size.height = fb->height;
4775                 plane_size->surface_pitch =
4776                         fb->pitches[0] / fb->format->cpp[0];
4777
4778                 plane_size->chroma_size.x = 0;
4779                 plane_size->chroma_size.y = 0;
4780                 /* TODO: set these based on surface format */
4781                 plane_size->chroma_size.width = fb->width / 2;
4782                 plane_size->chroma_size.height = fb->height / 2;
4783
4784                 plane_size->chroma_pitch =
4785                         fb->pitches[1] / fb->format->cpp[1];
4786
4787                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4788                 address->video_progressive.luma_addr.low_part =
4789                         lower_32_bits(luma_addr);
4790                 address->video_progressive.luma_addr.high_part =
4791                         upper_32_bits(luma_addr);
4792                 address->video_progressive.chroma_addr.low_part =
4793                         lower_32_bits(chroma_addr);
4794                 address->video_progressive.chroma_addr.high_part =
4795                         upper_32_bits(chroma_addr);
4796         }
4797
4798         if (adev->family >= AMDGPU_FAMILY_AI) {
4799                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4800                                                                 rotation, plane_size,
4801                                                                 tiling_info, dcc,
4802                                                                 address,
4803                                                                 force_disable_dcc);
4804                 if (ret)
4805                         return ret;
4806         } else {
4807                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4808         }
4809
4810         return 0;
4811 }
4812
4813 static void
4814 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4815                                bool *per_pixel_alpha, bool *global_alpha,
4816                                int *global_alpha_value)
4817 {
4818         *per_pixel_alpha = false;
4819         *global_alpha = false;
4820         *global_alpha_value = 0xff;
4821
4822         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4823                 return;
4824
4825         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4826                 static const uint32_t alpha_formats[] = {
4827                         DRM_FORMAT_ARGB8888,
4828                         DRM_FORMAT_RGBA8888,
4829                         DRM_FORMAT_ABGR8888,
4830                 };
4831                 uint32_t format = plane_state->fb->format->format;
4832                 unsigned int i;
4833
4834                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4835                         if (format == alpha_formats[i]) {
4836                                 *per_pixel_alpha = true;
4837                                 break;
4838                         }
4839                 }
4840         }
4841
4842         if (plane_state->alpha < 0xffff) {
4843                 *global_alpha = true;
4844                 *global_alpha_value = plane_state->alpha >> 8;
4845         }
4846 }
4847
4848 static int
4849 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4850                             const enum surface_pixel_format format,
4851                             enum dc_color_space *color_space)
4852 {
4853         bool full_range;
4854
4855         *color_space = COLOR_SPACE_SRGB;
4856
4857         /* DRM color properties only affect non-RGB formats. */
4858         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4859                 return 0;
4860
4861         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4862
4863         switch (plane_state->color_encoding) {
4864         case DRM_COLOR_YCBCR_BT601:
4865                 if (full_range)
4866                         *color_space = COLOR_SPACE_YCBCR601;
4867                 else
4868                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4869                 break;
4870
4871         case DRM_COLOR_YCBCR_BT709:
4872                 if (full_range)
4873                         *color_space = COLOR_SPACE_YCBCR709;
4874                 else
4875                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4876                 break;
4877
4878         case DRM_COLOR_YCBCR_BT2020:
4879                 if (full_range)
4880                         *color_space = COLOR_SPACE_2020_YCBCR;
4881                 else
4882                         return -EINVAL;
4883                 break;
4884
4885         default:
4886                 return -EINVAL;
4887         }
4888
4889         return 0;
4890 }
4891
4892 static int
4893 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4894                             const struct drm_plane_state *plane_state,
4895                             const uint64_t tiling_flags,
4896                             struct dc_plane_info *plane_info,
4897                             struct dc_plane_address *address,
4898                             bool tmz_surface,
4899                             bool force_disable_dcc)
4900 {
4901         const struct drm_framebuffer *fb = plane_state->fb;
4902         const struct amdgpu_framebuffer *afb =
4903                 to_amdgpu_framebuffer(plane_state->fb);
4904         int ret;
4905
4906         memset(plane_info, 0, sizeof(*plane_info));
4907
4908         switch (fb->format->format) {
4909         case DRM_FORMAT_C8:
4910                 plane_info->format =
4911                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4912                 break;
4913         case DRM_FORMAT_RGB565:
4914                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4915                 break;
4916         case DRM_FORMAT_XRGB8888:
4917         case DRM_FORMAT_ARGB8888:
4918                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4919                 break;
4920         case DRM_FORMAT_XRGB2101010:
4921         case DRM_FORMAT_ARGB2101010:
4922                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4923                 break;
4924         case DRM_FORMAT_XBGR2101010:
4925         case DRM_FORMAT_ABGR2101010:
4926                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4927                 break;
4928         case DRM_FORMAT_XBGR8888:
4929         case DRM_FORMAT_ABGR8888:
4930                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4931                 break;
4932         case DRM_FORMAT_NV21:
4933                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4934                 break;
4935         case DRM_FORMAT_NV12:
4936                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4937                 break;
4938         case DRM_FORMAT_P010:
4939                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4940                 break;
4941         case DRM_FORMAT_XRGB16161616F:
4942         case DRM_FORMAT_ARGB16161616F:
4943                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4944                 break;
4945         case DRM_FORMAT_XBGR16161616F:
4946         case DRM_FORMAT_ABGR16161616F:
4947                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4948                 break;
4949         default:
4950                 DRM_ERROR(
4951                         "Unsupported screen format %p4cc\n",
4952                         &fb->format->format);
4953                 return -EINVAL;
4954         }
4955
4956         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4957         case DRM_MODE_ROTATE_0:
4958                 plane_info->rotation = ROTATION_ANGLE_0;
4959                 break;
4960         case DRM_MODE_ROTATE_90:
4961                 plane_info->rotation = ROTATION_ANGLE_90;
4962                 break;
4963         case DRM_MODE_ROTATE_180:
4964                 plane_info->rotation = ROTATION_ANGLE_180;
4965                 break;
4966         case DRM_MODE_ROTATE_270:
4967                 plane_info->rotation = ROTATION_ANGLE_270;
4968                 break;
4969         default:
4970                 plane_info->rotation = ROTATION_ANGLE_0;
4971                 break;
4972         }
4973
4974         plane_info->visible = true;
4975         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4976
4977         plane_info->layer_index = 0;
4978
4979         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4980                                           &plane_info->color_space);
4981         if (ret)
4982                 return ret;
4983
4984         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4985                                            plane_info->rotation, tiling_flags,
4986                                            &plane_info->tiling_info,
4987                                            &plane_info->plane_size,
4988                                            &plane_info->dcc, address, tmz_surface,
4989                                            force_disable_dcc);
4990         if (ret)
4991                 return ret;
4992
4993         fill_blending_from_plane_state(
4994                 plane_state, &plane_info->per_pixel_alpha,
4995                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4996
4997         return 0;
4998 }
4999
5000 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5001                                     struct dc_plane_state *dc_plane_state,
5002                                     struct drm_plane_state *plane_state,
5003                                     struct drm_crtc_state *crtc_state)
5004 {
5005         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5006         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5007         struct dc_scaling_info scaling_info;
5008         struct dc_plane_info plane_info;
5009         int ret;
5010         bool force_disable_dcc = false;
5011
5012         ret = fill_dc_scaling_info(plane_state, &scaling_info);
5013         if (ret)
5014                 return ret;
5015
5016         dc_plane_state->src_rect = scaling_info.src_rect;
5017         dc_plane_state->dst_rect = scaling_info.dst_rect;
5018         dc_plane_state->clip_rect = scaling_info.clip_rect;
5019         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5020
5021         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5022         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5023                                           afb->tiling_flags,
5024                                           &plane_info,
5025                                           &dc_plane_state->address,
5026                                           afb->tmz_surface,
5027                                           force_disable_dcc);
5028         if (ret)
5029                 return ret;
5030
5031         dc_plane_state->format = plane_info.format;
5032         dc_plane_state->color_space = plane_info.color_space;
5033         dc_plane_state->format = plane_info.format;
5034         dc_plane_state->plane_size = plane_info.plane_size;
5035         dc_plane_state->rotation = plane_info.rotation;
5036         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5037         dc_plane_state->stereo_format = plane_info.stereo_format;
5038         dc_plane_state->tiling_info = plane_info.tiling_info;
5039         dc_plane_state->visible = plane_info.visible;
5040         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5041         dc_plane_state->global_alpha = plane_info.global_alpha;
5042         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5043         dc_plane_state->dcc = plane_info.dcc;
5044         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5045         dc_plane_state->flip_int_enabled = true;
5046
5047         /*
5048          * Always set input transfer function, since plane state is refreshed
5049          * every time.
5050          */
5051         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5052         if (ret)
5053                 return ret;
5054
5055         return 0;
5056 }
5057
5058 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5059                                            const struct dm_connector_state *dm_state,
5060                                            struct dc_stream_state *stream)
5061 {
5062         enum amdgpu_rmx_type rmx_type;
5063
5064         struct rect src = { 0 }; /* viewport in composition space*/
5065         struct rect dst = { 0 }; /* stream addressable area */
5066
5067         /* no mode. nothing to be done */
5068         if (!mode)
5069                 return;
5070
5071         /* Full screen scaling by default */
5072         src.width = mode->hdisplay;
5073         src.height = mode->vdisplay;
5074         dst.width = stream->timing.h_addressable;
5075         dst.height = stream->timing.v_addressable;
5076
5077         if (dm_state) {
5078                 rmx_type = dm_state->scaling;
5079                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5080                         if (src.width * dst.height <
5081                                         src.height * dst.width) {
5082                                 /* height needs less upscaling/more downscaling */
5083                                 dst.width = src.width *
5084                                                 dst.height / src.height;
5085                         } else {
5086                                 /* width needs less upscaling/more downscaling */
5087                                 dst.height = src.height *
5088                                                 dst.width / src.width;
5089                         }
5090                 } else if (rmx_type == RMX_CENTER) {
5091                         dst = src;
5092                 }
5093
5094                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5095                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5096
5097                 if (dm_state->underscan_enable) {
5098                         dst.x += dm_state->underscan_hborder / 2;
5099                         dst.y += dm_state->underscan_vborder / 2;
5100                         dst.width -= dm_state->underscan_hborder;
5101                         dst.height -= dm_state->underscan_vborder;
5102                 }
5103         }
5104
5105         stream->src = src;
5106         stream->dst = dst;
5107
5108         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5109                       dst.x, dst.y, dst.width, dst.height);
5110
5111 }
5112
5113 static enum dc_color_depth
5114 convert_color_depth_from_display_info(const struct drm_connector *connector,
5115                                       bool is_y420, int requested_bpc)
5116 {
5117         uint8_t bpc;
5118
5119         if (is_y420) {
5120                 bpc = 8;
5121
5122                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5123                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5124                         bpc = 16;
5125                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5126                         bpc = 12;
5127                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5128                         bpc = 10;
5129         } else {
5130                 bpc = (uint8_t)connector->display_info.bpc;
5131                 /* Assume 8 bpc by default if no bpc is specified. */
5132                 bpc = bpc ? bpc : 8;
5133         }
5134
5135         if (requested_bpc > 0) {
5136                 /*
5137                  * Cap display bpc based on the user requested value.
5138                  *
5139                  * The value for state->max_bpc may not correctly updated
5140                  * depending on when the connector gets added to the state
5141                  * or if this was called outside of atomic check, so it
5142                  * can't be used directly.
5143                  */
5144                 bpc = min_t(u8, bpc, requested_bpc);
5145
5146                 /* Round down to the nearest even number. */
5147                 bpc = bpc - (bpc & 1);
5148         }
5149
5150         switch (bpc) {
5151         case 0:
5152                 /*
5153                  * Temporary Work around, DRM doesn't parse color depth for
5154                  * EDID revision before 1.4
5155                  * TODO: Fix edid parsing
5156                  */
5157                 return COLOR_DEPTH_888;
5158         case 6:
5159                 return COLOR_DEPTH_666;
5160         case 8:
5161                 return COLOR_DEPTH_888;
5162         case 10:
5163                 return COLOR_DEPTH_101010;
5164         case 12:
5165                 return COLOR_DEPTH_121212;
5166         case 14:
5167                 return COLOR_DEPTH_141414;
5168         case 16:
5169                 return COLOR_DEPTH_161616;
5170         default:
5171                 return COLOR_DEPTH_UNDEFINED;
5172         }
5173 }
5174
5175 static enum dc_aspect_ratio
5176 get_aspect_ratio(const struct drm_display_mode *mode_in)
5177 {
5178         /* 1-1 mapping, since both enums follow the HDMI spec. */
5179         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5180 }
5181
5182 static enum dc_color_space
5183 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5184 {
5185         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5186
5187         switch (dc_crtc_timing->pixel_encoding) {
5188         case PIXEL_ENCODING_YCBCR422:
5189         case PIXEL_ENCODING_YCBCR444:
5190         case PIXEL_ENCODING_YCBCR420:
5191         {
5192                 /*
5193                  * 27030khz is the separation point between HDTV and SDTV
5194                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5195                  * respectively
5196                  */
5197                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5198                         if (dc_crtc_timing->flags.Y_ONLY)
5199                                 color_space =
5200                                         COLOR_SPACE_YCBCR709_LIMITED;
5201                         else
5202                                 color_space = COLOR_SPACE_YCBCR709;
5203                 } else {
5204                         if (dc_crtc_timing->flags.Y_ONLY)
5205                                 color_space =
5206                                         COLOR_SPACE_YCBCR601_LIMITED;
5207                         else
5208                                 color_space = COLOR_SPACE_YCBCR601;
5209                 }
5210
5211         }
5212         break;
5213         case PIXEL_ENCODING_RGB:
5214                 color_space = COLOR_SPACE_SRGB;
5215                 break;
5216
5217         default:
5218                 WARN_ON(1);
5219                 break;
5220         }
5221
5222         return color_space;
5223 }
5224
5225 static bool adjust_colour_depth_from_display_info(
5226         struct dc_crtc_timing *timing_out,
5227         const struct drm_display_info *info)
5228 {
5229         enum dc_color_depth depth = timing_out->display_color_depth;
5230         int normalized_clk;
5231         do {
5232                 normalized_clk = timing_out->pix_clk_100hz / 10;
5233                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5234                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5235                         normalized_clk /= 2;
5236                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5237                 switch (depth) {
5238                 case COLOR_DEPTH_888:
5239                         break;
5240                 case COLOR_DEPTH_101010:
5241                         normalized_clk = (normalized_clk * 30) / 24;
5242                         break;
5243                 case COLOR_DEPTH_121212:
5244                         normalized_clk = (normalized_clk * 36) / 24;
5245                         break;
5246                 case COLOR_DEPTH_161616:
5247                         normalized_clk = (normalized_clk * 48) / 24;
5248                         break;
5249                 default:
5250                         /* The above depths are the only ones valid for HDMI. */
5251                         return false;
5252                 }
5253                 if (normalized_clk <= info->max_tmds_clock) {
5254                         timing_out->display_color_depth = depth;
5255                         return true;
5256                 }
5257         } while (--depth > COLOR_DEPTH_666);
5258         return false;
5259 }
5260
5261 static void fill_stream_properties_from_drm_display_mode(
5262         struct dc_stream_state *stream,
5263         const struct drm_display_mode *mode_in,
5264         const struct drm_connector *connector,
5265         const struct drm_connector_state *connector_state,
5266         const struct dc_stream_state *old_stream,
5267         int requested_bpc)
5268 {
5269         struct dc_crtc_timing *timing_out = &stream->timing;
5270         const struct drm_display_info *info = &connector->display_info;
5271         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5272         struct hdmi_vendor_infoframe hv_frame;
5273         struct hdmi_avi_infoframe avi_frame;
5274
5275         memset(&hv_frame, 0, sizeof(hv_frame));
5276         memset(&avi_frame, 0, sizeof(avi_frame));
5277
5278         timing_out->h_border_left = 0;
5279         timing_out->h_border_right = 0;
5280         timing_out->v_border_top = 0;
5281         timing_out->v_border_bottom = 0;
5282         /* TODO: un-hardcode */
5283         if (drm_mode_is_420_only(info, mode_in)
5284                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5285                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5286         else if (drm_mode_is_420_also(info, mode_in)
5287                         && aconnector->force_yuv420_output)
5288                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5289         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5290                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5291                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5292         else
5293                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5294
5295         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5296         timing_out->display_color_depth = convert_color_depth_from_display_info(
5297                 connector,
5298                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5299                 requested_bpc);
5300         timing_out->scan_type = SCANNING_TYPE_NODATA;
5301         timing_out->hdmi_vic = 0;
5302
5303         if(old_stream) {
5304                 timing_out->vic = old_stream->timing.vic;
5305                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5306                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5307         } else {
5308                 timing_out->vic = drm_match_cea_mode(mode_in);
5309                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5310                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5311                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5312                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5313         }
5314
5315         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5316                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5317                 timing_out->vic = avi_frame.video_code;
5318                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5319                 timing_out->hdmi_vic = hv_frame.vic;
5320         }
5321
5322         if (is_freesync_video_mode(mode_in, aconnector)) {
5323                 timing_out->h_addressable = mode_in->hdisplay;
5324                 timing_out->h_total = mode_in->htotal;
5325                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5326                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5327                 timing_out->v_total = mode_in->vtotal;
5328                 timing_out->v_addressable = mode_in->vdisplay;
5329                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5330                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5331                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5332         } else {
5333                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5334                 timing_out->h_total = mode_in->crtc_htotal;
5335                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5336                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5337                 timing_out->v_total = mode_in->crtc_vtotal;
5338                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5339                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5340                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5341                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5342         }
5343
5344         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5345
5346         stream->output_color_space = get_output_color_space(timing_out);
5347
5348         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5349         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5350         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5351                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5352                     drm_mode_is_420_also(info, mode_in) &&
5353                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5354                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5355                         adjust_colour_depth_from_display_info(timing_out, info);
5356                 }
5357         }
5358 }
5359
5360 static void fill_audio_info(struct audio_info *audio_info,
5361                             const struct drm_connector *drm_connector,
5362                             const struct dc_sink *dc_sink)
5363 {
5364         int i = 0;
5365         int cea_revision = 0;
5366         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5367
5368         audio_info->manufacture_id = edid_caps->manufacturer_id;
5369         audio_info->product_id = edid_caps->product_id;
5370
5371         cea_revision = drm_connector->display_info.cea_rev;
5372
5373         strscpy(audio_info->display_name,
5374                 edid_caps->display_name,
5375                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5376
5377         if (cea_revision >= 3) {
5378                 audio_info->mode_count = edid_caps->audio_mode_count;
5379
5380                 for (i = 0; i < audio_info->mode_count; ++i) {
5381                         audio_info->modes[i].format_code =
5382                                         (enum audio_format_code)
5383                                         (edid_caps->audio_modes[i].format_code);
5384                         audio_info->modes[i].channel_count =
5385                                         edid_caps->audio_modes[i].channel_count;
5386                         audio_info->modes[i].sample_rates.all =
5387                                         edid_caps->audio_modes[i].sample_rate;
5388                         audio_info->modes[i].sample_size =
5389                                         edid_caps->audio_modes[i].sample_size;
5390                 }
5391         }
5392
5393         audio_info->flags.all = edid_caps->speaker_flags;
5394
5395         /* TODO: We only check for the progressive mode, check for interlace mode too */
5396         if (drm_connector->latency_present[0]) {
5397                 audio_info->video_latency = drm_connector->video_latency[0];
5398                 audio_info->audio_latency = drm_connector->audio_latency[0];
5399         }
5400
5401         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5402
5403 }
5404
5405 static void
5406 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5407                                       struct drm_display_mode *dst_mode)
5408 {
5409         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5410         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5411         dst_mode->crtc_clock = src_mode->crtc_clock;
5412         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5413         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5414         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5415         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5416         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5417         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5418         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5419         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5420         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5421         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5422         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5423 }
5424
5425 static void
5426 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5427                                         const struct drm_display_mode *native_mode,
5428                                         bool scale_enabled)
5429 {
5430         if (scale_enabled) {
5431                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5432         } else if (native_mode->clock == drm_mode->clock &&
5433                         native_mode->htotal == drm_mode->htotal &&
5434                         native_mode->vtotal == drm_mode->vtotal) {
5435                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5436         } else {
5437                 /* no scaling nor amdgpu inserted, no need to patch */
5438         }
5439 }
5440
5441 static struct dc_sink *
5442 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5443 {
5444         struct dc_sink_init_data sink_init_data = { 0 };
5445         struct dc_sink *sink = NULL;
5446         sink_init_data.link = aconnector->dc_link;
5447         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5448
5449         sink = dc_sink_create(&sink_init_data);
5450         if (!sink) {
5451                 DRM_ERROR("Failed to create sink!\n");
5452                 return NULL;
5453         }
5454         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5455
5456         return sink;
5457 }
5458
5459 static void set_multisync_trigger_params(
5460                 struct dc_stream_state *stream)
5461 {
5462         struct dc_stream_state *master = NULL;
5463
5464         if (stream->triggered_crtc_reset.enabled) {
5465                 master = stream->triggered_crtc_reset.event_source;
5466                 stream->triggered_crtc_reset.event =
5467                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5468                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5469                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5470         }
5471 }
5472
5473 static void set_master_stream(struct dc_stream_state *stream_set[],
5474                               int stream_count)
5475 {
5476         int j, highest_rfr = 0, master_stream = 0;
5477
5478         for (j = 0;  j < stream_count; j++) {
5479                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5480                         int refresh_rate = 0;
5481
5482                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5483                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5484                         if (refresh_rate > highest_rfr) {
5485                                 highest_rfr = refresh_rate;
5486                                 master_stream = j;
5487                         }
5488                 }
5489         }
5490         for (j = 0;  j < stream_count; j++) {
5491                 if (stream_set[j])
5492                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5493         }
5494 }
5495
5496 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5497 {
5498         int i = 0;
5499         struct dc_stream_state *stream;
5500
5501         if (context->stream_count < 2)
5502                 return;
5503         for (i = 0; i < context->stream_count ; i++) {
5504                 if (!context->streams[i])
5505                         continue;
5506                 /*
5507                  * TODO: add a function to read AMD VSDB bits and set
5508                  * crtc_sync_master.multi_sync_enabled flag
5509                  * For now it's set to false
5510                  */
5511         }
5512
5513         set_master_stream(context->streams, context->stream_count);
5514
5515         for (i = 0; i < context->stream_count ; i++) {
5516                 stream = context->streams[i];
5517
5518                 if (!stream)
5519                         continue;
5520
5521                 set_multisync_trigger_params(stream);
5522         }
5523 }
5524
5525 static struct drm_display_mode *
5526 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5527                           bool use_probed_modes)
5528 {
5529         struct drm_display_mode *m, *m_pref = NULL;
5530         u16 current_refresh, highest_refresh;
5531         struct list_head *list_head = use_probed_modes ?
5532                                                     &aconnector->base.probed_modes :
5533                                                     &aconnector->base.modes;
5534
5535         if (aconnector->freesync_vid_base.clock != 0)
5536                 return &aconnector->freesync_vid_base;
5537
5538         /* Find the preferred mode */
5539         list_for_each_entry (m, list_head, head) {
5540                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5541                         m_pref = m;
5542                         break;
5543                 }
5544         }
5545
5546         if (!m_pref) {
5547                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5548                 m_pref = list_first_entry_or_null(
5549                         &aconnector->base.modes, struct drm_display_mode, head);
5550                 if (!m_pref) {
5551                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5552                         return NULL;
5553                 }
5554         }
5555
5556         highest_refresh = drm_mode_vrefresh(m_pref);
5557
5558         /*
5559          * Find the mode with highest refresh rate with same resolution.
5560          * For some monitors, preferred mode is not the mode with highest
5561          * supported refresh rate.
5562          */
5563         list_for_each_entry (m, list_head, head) {
5564                 current_refresh  = drm_mode_vrefresh(m);
5565
5566                 if (m->hdisplay == m_pref->hdisplay &&
5567                     m->vdisplay == m_pref->vdisplay &&
5568                     highest_refresh < current_refresh) {
5569                         highest_refresh = current_refresh;
5570                         m_pref = m;
5571                 }
5572         }
5573
5574         aconnector->freesync_vid_base = *m_pref;
5575         return m_pref;
5576 }
5577
5578 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5579                                    struct amdgpu_dm_connector *aconnector)
5580 {
5581         struct drm_display_mode *high_mode;
5582         int timing_diff;
5583
5584         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5585         if (!high_mode || !mode)
5586                 return false;
5587
5588         timing_diff = high_mode->vtotal - mode->vtotal;
5589
5590         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5591             high_mode->hdisplay != mode->hdisplay ||
5592             high_mode->vdisplay != mode->vdisplay ||
5593             high_mode->hsync_start != mode->hsync_start ||
5594             high_mode->hsync_end != mode->hsync_end ||
5595             high_mode->htotal != mode->htotal ||
5596             high_mode->hskew != mode->hskew ||
5597             high_mode->vscan != mode->vscan ||
5598             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5599             high_mode->vsync_end - mode->vsync_end != timing_diff)
5600                 return false;
5601         else
5602                 return true;
5603 }
5604
5605 static struct dc_stream_state *
5606 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5607                        const struct drm_display_mode *drm_mode,
5608                        const struct dm_connector_state *dm_state,
5609                        const struct dc_stream_state *old_stream,
5610                        int requested_bpc)
5611 {
5612         struct drm_display_mode *preferred_mode = NULL;
5613         struct drm_connector *drm_connector;
5614         const struct drm_connector_state *con_state =
5615                 dm_state ? &dm_state->base : NULL;
5616         struct dc_stream_state *stream = NULL;
5617         struct drm_display_mode mode = *drm_mode;
5618         struct drm_display_mode saved_mode;
5619         struct drm_display_mode *freesync_mode = NULL;
5620         bool native_mode_found = false;
5621         bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5622         int mode_refresh;
5623         int preferred_refresh = 0;
5624 #if defined(CONFIG_DRM_AMD_DC_DCN)
5625         struct dsc_dec_dpcd_caps dsc_caps;
5626         uint32_t link_bandwidth_kbps;
5627 #endif
5628         struct dc_sink *sink = NULL;
5629
5630         memset(&saved_mode, 0, sizeof(saved_mode));
5631
5632         if (aconnector == NULL) {
5633                 DRM_ERROR("aconnector is NULL!\n");
5634                 return stream;
5635         }
5636
5637         drm_connector = &aconnector->base;
5638
5639         if (!aconnector->dc_sink) {
5640                 sink = create_fake_sink(aconnector);
5641                 if (!sink)
5642                         return stream;
5643         } else {
5644                 sink = aconnector->dc_sink;
5645                 dc_sink_retain(sink);
5646         }
5647
5648         stream = dc_create_stream_for_sink(sink);
5649
5650         if (stream == NULL) {
5651                 DRM_ERROR("Failed to create stream for sink!\n");
5652                 goto finish;
5653         }
5654
5655         stream->dm_stream_context = aconnector;
5656
5657         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5658                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5659
5660         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5661                 /* Search for preferred mode */
5662                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5663                         native_mode_found = true;
5664                         break;
5665                 }
5666         }
5667         if (!native_mode_found)
5668                 preferred_mode = list_first_entry_or_null(
5669                                 &aconnector->base.modes,
5670                                 struct drm_display_mode,
5671                                 head);
5672
5673         mode_refresh = drm_mode_vrefresh(&mode);
5674
5675         if (preferred_mode == NULL) {
5676                 /*
5677                  * This may not be an error, the use case is when we have no
5678                  * usermode calls to reset and set mode upon hotplug. In this
5679                  * case, we call set mode ourselves to restore the previous mode
5680                  * and the modelist may not be filled in in time.
5681                  */
5682                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5683         } else {
5684                 recalculate_timing |= amdgpu_freesync_vid_mode &&
5685                                  is_freesync_video_mode(&mode, aconnector);
5686                 if (recalculate_timing) {
5687                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5688                         saved_mode = mode;
5689                         mode = *freesync_mode;
5690                 } else {
5691                         decide_crtc_timing_for_drm_display_mode(
5692                                 &mode, preferred_mode,
5693                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5694                 }
5695
5696                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5697         }
5698
5699         if (recalculate_timing)
5700                 drm_mode_set_crtcinfo(&saved_mode, 0);
5701         else if (!dm_state)
5702                 drm_mode_set_crtcinfo(&mode, 0);
5703
5704        /*
5705         * If scaling is enabled and refresh rate didn't change
5706         * we copy the vic and polarities of the old timings
5707         */
5708         if (!recalculate_timing || mode_refresh != preferred_refresh)
5709                 fill_stream_properties_from_drm_display_mode(
5710                         stream, &mode, &aconnector->base, con_state, NULL,
5711                         requested_bpc);
5712         else
5713                 fill_stream_properties_from_drm_display_mode(
5714                         stream, &mode, &aconnector->base, con_state, old_stream,
5715                         requested_bpc);
5716
5717         stream->timing.flags.DSC = 0;
5718
5719         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5720 #if defined(CONFIG_DRM_AMD_DC_DCN)
5721                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5722                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5723                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5724                                       &dsc_caps);
5725                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5726                                                              dc_link_get_link_cap(aconnector->dc_link));
5727
5728                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5729                         /* Set DSC policy according to dsc_clock_en */
5730                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5731                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5732
5733                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5734                                                   &dsc_caps,
5735                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5736                                                   0,
5737                                                   link_bandwidth_kbps,
5738                                                   &stream->timing,
5739                                                   &stream->timing.dsc_cfg))
5740                                 stream->timing.flags.DSC = 1;
5741                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5742                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5743                                 stream->timing.flags.DSC = 1;
5744
5745                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5746                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5747
5748                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5749                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5750
5751                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5752                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5753                 }
5754 #endif
5755         }
5756
5757         update_stream_scaling_settings(&mode, dm_state, stream);
5758
5759         fill_audio_info(
5760                 &stream->audio_info,
5761                 drm_connector,
5762                 sink);
5763
5764         update_stream_signal(stream, sink);
5765
5766         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5767                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5768
5769         if (stream->link->psr_settings.psr_feature_enabled) {
5770                 //
5771                 // should decide stream support vsc sdp colorimetry capability
5772                 // before building vsc info packet
5773                 //
5774                 stream->use_vsc_sdp_for_colorimetry = false;
5775                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5776                         stream->use_vsc_sdp_for_colorimetry =
5777                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5778                 } else {
5779                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5780                                 stream->use_vsc_sdp_for_colorimetry = true;
5781                 }
5782                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5783         }
5784 finish:
5785         dc_sink_release(sink);
5786
5787         return stream;
5788 }
5789
5790 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5791 {
5792         drm_crtc_cleanup(crtc);
5793         kfree(crtc);
5794 }
5795
5796 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5797                                   struct drm_crtc_state *state)
5798 {
5799         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5800
5801         /* TODO Destroy dc_stream objects are stream object is flattened */
5802         if (cur->stream)
5803                 dc_stream_release(cur->stream);
5804
5805
5806         __drm_atomic_helper_crtc_destroy_state(state);
5807
5808
5809         kfree(state);
5810 }
5811
5812 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5813 {
5814         struct dm_crtc_state *state;
5815
5816         if (crtc->state)
5817                 dm_crtc_destroy_state(crtc, crtc->state);
5818
5819         state = kzalloc(sizeof(*state), GFP_KERNEL);
5820         if (WARN_ON(!state))
5821                 return;
5822
5823         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5824 }
5825
5826 static struct drm_crtc_state *
5827 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5828 {
5829         struct dm_crtc_state *state, *cur;
5830
5831         cur = to_dm_crtc_state(crtc->state);
5832
5833         if (WARN_ON(!crtc->state))
5834                 return NULL;
5835
5836         state = kzalloc(sizeof(*state), GFP_KERNEL);
5837         if (!state)
5838                 return NULL;
5839
5840         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5841
5842         if (cur->stream) {
5843                 state->stream = cur->stream;
5844                 dc_stream_retain(state->stream);
5845         }
5846
5847         state->active_planes = cur->active_planes;
5848         state->vrr_infopacket = cur->vrr_infopacket;
5849         state->abm_level = cur->abm_level;
5850         state->vrr_supported = cur->vrr_supported;
5851         state->freesync_config = cur->freesync_config;
5852         state->cm_has_degamma = cur->cm_has_degamma;
5853         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5854         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5855
5856         return &state->base;
5857 }
5858
5859 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5860 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5861 {
5862         crtc_debugfs_init(crtc);
5863
5864         return 0;
5865 }
5866 #endif
5867
5868 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5869 {
5870         enum dc_irq_source irq_source;
5871         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5872         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5873         int rc;
5874
5875         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5876
5877         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5878
5879         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5880                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5881         return rc;
5882 }
5883
5884 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5885 {
5886         enum dc_irq_source irq_source;
5887         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5888         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5889         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5890 #if defined(CONFIG_DRM_AMD_DC_DCN)
5891         struct amdgpu_display_manager *dm = &adev->dm;
5892         unsigned long flags;
5893 #endif
5894         int rc = 0;
5895
5896         if (enable) {
5897                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5898                 if (amdgpu_dm_vrr_active(acrtc_state))
5899                         rc = dm_set_vupdate_irq(crtc, true);
5900         } else {
5901                 /* vblank irq off -> vupdate irq off */
5902                 rc = dm_set_vupdate_irq(crtc, false);
5903         }
5904
5905         if (rc)
5906                 return rc;
5907
5908         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5909
5910         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5911                 return -EBUSY;
5912
5913         if (amdgpu_in_reset(adev))
5914                 return 0;
5915
5916 #if defined(CONFIG_DRM_AMD_DC_DCN)
5917         spin_lock_irqsave(&dm->vblank_lock, flags);
5918         dm->vblank_workqueue->dm = dm;
5919         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5920         dm->vblank_workqueue->enable = enable;
5921         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5922         schedule_work(&dm->vblank_workqueue->mall_work);
5923 #endif
5924
5925         return 0;
5926 }
5927
5928 static int dm_enable_vblank(struct drm_crtc *crtc)
5929 {
5930         return dm_set_vblank(crtc, true);
5931 }
5932
5933 static void dm_disable_vblank(struct drm_crtc *crtc)
5934 {
5935         dm_set_vblank(crtc, false);
5936 }
5937
5938 /* Implemented only the options currently availible for the driver */
5939 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5940         .reset = dm_crtc_reset_state,
5941         .destroy = amdgpu_dm_crtc_destroy,
5942         .set_config = drm_atomic_helper_set_config,
5943         .page_flip = drm_atomic_helper_page_flip,
5944         .atomic_duplicate_state = dm_crtc_duplicate_state,
5945         .atomic_destroy_state = dm_crtc_destroy_state,
5946         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5947         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5948         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5949         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5950         .enable_vblank = dm_enable_vblank,
5951         .disable_vblank = dm_disable_vblank,
5952         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5953 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5954         .late_register = amdgpu_dm_crtc_late_register,
5955 #endif
5956 };
5957
5958 static enum drm_connector_status
5959 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5960 {
5961         bool connected;
5962         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5963
5964         /*
5965          * Notes:
5966          * 1. This interface is NOT called in context of HPD irq.
5967          * 2. This interface *is called* in context of user-mode ioctl. Which
5968          * makes it a bad place for *any* MST-related activity.
5969          */
5970
5971         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5972             !aconnector->fake_enable)
5973                 connected = (aconnector->dc_sink != NULL);
5974         else
5975                 connected = (aconnector->base.force == DRM_FORCE_ON);
5976
5977         update_subconnector_property(aconnector);
5978
5979         return (connected ? connector_status_connected :
5980                         connector_status_disconnected);
5981 }
5982
5983 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5984                                             struct drm_connector_state *connector_state,
5985                                             struct drm_property *property,
5986                                             uint64_t val)
5987 {
5988         struct drm_device *dev = connector->dev;
5989         struct amdgpu_device *adev = drm_to_adev(dev);
5990         struct dm_connector_state *dm_old_state =
5991                 to_dm_connector_state(connector->state);
5992         struct dm_connector_state *dm_new_state =
5993                 to_dm_connector_state(connector_state);
5994
5995         int ret = -EINVAL;
5996
5997         if (property == dev->mode_config.scaling_mode_property) {
5998                 enum amdgpu_rmx_type rmx_type;
5999
6000                 switch (val) {
6001                 case DRM_MODE_SCALE_CENTER:
6002                         rmx_type = RMX_CENTER;
6003                         break;
6004                 case DRM_MODE_SCALE_ASPECT:
6005                         rmx_type = RMX_ASPECT;
6006                         break;
6007                 case DRM_MODE_SCALE_FULLSCREEN:
6008                         rmx_type = RMX_FULL;
6009                         break;
6010                 case DRM_MODE_SCALE_NONE:
6011                 default:
6012                         rmx_type = RMX_OFF;
6013                         break;
6014                 }
6015
6016                 if (dm_old_state->scaling == rmx_type)
6017                         return 0;
6018
6019                 dm_new_state->scaling = rmx_type;
6020                 ret = 0;
6021         } else if (property == adev->mode_info.underscan_hborder_property) {
6022                 dm_new_state->underscan_hborder = val;
6023                 ret = 0;
6024         } else if (property == adev->mode_info.underscan_vborder_property) {
6025                 dm_new_state->underscan_vborder = val;
6026                 ret = 0;
6027         } else if (property == adev->mode_info.underscan_property) {
6028                 dm_new_state->underscan_enable = val;
6029                 ret = 0;
6030         } else if (property == adev->mode_info.abm_level_property) {
6031                 dm_new_state->abm_level = val;
6032                 ret = 0;
6033         }
6034
6035         return ret;
6036 }
6037
6038 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6039                                             const struct drm_connector_state *state,
6040                                             struct drm_property *property,
6041                                             uint64_t *val)
6042 {
6043         struct drm_device *dev = connector->dev;
6044         struct amdgpu_device *adev = drm_to_adev(dev);
6045         struct dm_connector_state *dm_state =
6046                 to_dm_connector_state(state);
6047         int ret = -EINVAL;
6048
6049         if (property == dev->mode_config.scaling_mode_property) {
6050                 switch (dm_state->scaling) {
6051                 case RMX_CENTER:
6052                         *val = DRM_MODE_SCALE_CENTER;
6053                         break;
6054                 case RMX_ASPECT:
6055                         *val = DRM_MODE_SCALE_ASPECT;
6056                         break;
6057                 case RMX_FULL:
6058                         *val = DRM_MODE_SCALE_FULLSCREEN;
6059                         break;
6060                 case RMX_OFF:
6061                 default:
6062                         *val = DRM_MODE_SCALE_NONE;
6063                         break;
6064                 }
6065                 ret = 0;
6066         } else if (property == adev->mode_info.underscan_hborder_property) {
6067                 *val = dm_state->underscan_hborder;
6068                 ret = 0;
6069         } else if (property == adev->mode_info.underscan_vborder_property) {
6070                 *val = dm_state->underscan_vborder;
6071                 ret = 0;
6072         } else if (property == adev->mode_info.underscan_property) {
6073                 *val = dm_state->underscan_enable;
6074                 ret = 0;
6075         } else if (property == adev->mode_info.abm_level_property) {
6076                 *val = dm_state->abm_level;
6077                 ret = 0;
6078         }
6079
6080         return ret;
6081 }
6082
6083 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6084 {
6085         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6086
6087         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6088 }
6089
6090 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6091 {
6092         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6093         const struct dc_link *link = aconnector->dc_link;
6094         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6095         struct amdgpu_display_manager *dm = &adev->dm;
6096
6097         /*
6098          * Call only if mst_mgr was iniitalized before since it's not done
6099          * for all connector types.
6100          */
6101         if (aconnector->mst_mgr.dev)
6102                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6103
6104 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6105         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6106
6107         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6108             link->type != dc_connection_none &&
6109             dm->backlight_dev) {
6110                 backlight_device_unregister(dm->backlight_dev);
6111                 dm->backlight_dev = NULL;
6112         }
6113 #endif
6114
6115         if (aconnector->dc_em_sink)
6116                 dc_sink_release(aconnector->dc_em_sink);
6117         aconnector->dc_em_sink = NULL;
6118         if (aconnector->dc_sink)
6119                 dc_sink_release(aconnector->dc_sink);
6120         aconnector->dc_sink = NULL;
6121
6122         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6123         drm_connector_unregister(connector);
6124         drm_connector_cleanup(connector);
6125         if (aconnector->i2c) {
6126                 i2c_del_adapter(&aconnector->i2c->base);
6127                 kfree(aconnector->i2c);
6128         }
6129         kfree(aconnector->dm_dp_aux.aux.name);
6130
6131         kfree(connector);
6132 }
6133
6134 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6135 {
6136         struct dm_connector_state *state =
6137                 to_dm_connector_state(connector->state);
6138
6139         if (connector->state)
6140                 __drm_atomic_helper_connector_destroy_state(connector->state);
6141
6142         kfree(state);
6143
6144         state = kzalloc(sizeof(*state), GFP_KERNEL);
6145
6146         if (state) {
6147                 state->scaling = RMX_OFF;
6148                 state->underscan_enable = false;
6149                 state->underscan_hborder = 0;
6150                 state->underscan_vborder = 0;
6151                 state->base.max_requested_bpc = 8;
6152                 state->vcpi_slots = 0;
6153                 state->pbn = 0;
6154                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6155                         state->abm_level = amdgpu_dm_abm_level;
6156
6157                 __drm_atomic_helper_connector_reset(connector, &state->base);
6158         }
6159 }
6160
6161 struct drm_connector_state *
6162 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6163 {
6164         struct dm_connector_state *state =
6165                 to_dm_connector_state(connector->state);
6166
6167         struct dm_connector_state *new_state =
6168                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6169
6170         if (!new_state)
6171                 return NULL;
6172
6173         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6174
6175         new_state->freesync_capable = state->freesync_capable;
6176         new_state->abm_level = state->abm_level;
6177         new_state->scaling = state->scaling;
6178         new_state->underscan_enable = state->underscan_enable;
6179         new_state->underscan_hborder = state->underscan_hborder;
6180         new_state->underscan_vborder = state->underscan_vborder;
6181         new_state->vcpi_slots = state->vcpi_slots;
6182         new_state->pbn = state->pbn;
6183         return &new_state->base;
6184 }
6185
6186 static int
6187 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6188 {
6189         struct amdgpu_dm_connector *amdgpu_dm_connector =
6190                 to_amdgpu_dm_connector(connector);
6191         int r;
6192
6193         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6194             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6195                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6196                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6197                 if (r)
6198                         return r;
6199         }
6200
6201 #if defined(CONFIG_DEBUG_FS)
6202         connector_debugfs_init(amdgpu_dm_connector);
6203 #endif
6204
6205         return 0;
6206 }
6207
6208 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6209         .reset = amdgpu_dm_connector_funcs_reset,
6210         .detect = amdgpu_dm_connector_detect,
6211         .fill_modes = drm_helper_probe_single_connector_modes,
6212         .destroy = amdgpu_dm_connector_destroy,
6213         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6214         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6215         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6216         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6217         .late_register = amdgpu_dm_connector_late_register,
6218         .early_unregister = amdgpu_dm_connector_unregister
6219 };
6220
6221 static int get_modes(struct drm_connector *connector)
6222 {
6223         return amdgpu_dm_connector_get_modes(connector);
6224 }
6225
6226 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6227 {
6228         struct dc_sink_init_data init_params = {
6229                         .link = aconnector->dc_link,
6230                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6231         };
6232         struct edid *edid;
6233
6234         if (!aconnector->base.edid_blob_ptr) {
6235                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6236                                 aconnector->base.name);
6237
6238                 aconnector->base.force = DRM_FORCE_OFF;
6239                 aconnector->base.override_edid = false;
6240                 return;
6241         }
6242
6243         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6244
6245         aconnector->edid = edid;
6246
6247         aconnector->dc_em_sink = dc_link_add_remote_sink(
6248                 aconnector->dc_link,
6249                 (uint8_t *)edid,
6250                 (edid->extensions + 1) * EDID_LENGTH,
6251                 &init_params);
6252
6253         if (aconnector->base.force == DRM_FORCE_ON) {
6254                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6255                 aconnector->dc_link->local_sink :
6256                 aconnector->dc_em_sink;
6257                 dc_sink_retain(aconnector->dc_sink);
6258         }
6259 }
6260
6261 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6262 {
6263         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6264
6265         /*
6266          * In case of headless boot with force on for DP managed connector
6267          * Those settings have to be != 0 to get initial modeset
6268          */
6269         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6270                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6271                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6272         }
6273
6274
6275         aconnector->base.override_edid = true;
6276         create_eml_sink(aconnector);
6277 }
6278
6279 static struct dc_stream_state *
6280 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6281                                 const struct drm_display_mode *drm_mode,
6282                                 const struct dm_connector_state *dm_state,
6283                                 const struct dc_stream_state *old_stream)
6284 {
6285         struct drm_connector *connector = &aconnector->base;
6286         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6287         struct dc_stream_state *stream;
6288         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6289         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6290         enum dc_status dc_result = DC_OK;
6291
6292         do {
6293                 stream = create_stream_for_sink(aconnector, drm_mode,
6294                                                 dm_state, old_stream,
6295                                                 requested_bpc);
6296                 if (stream == NULL) {
6297                         DRM_ERROR("Failed to create stream for sink!\n");
6298                         break;
6299                 }
6300
6301                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6302
6303                 if (dc_result != DC_OK) {
6304                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6305                                       drm_mode->hdisplay,
6306                                       drm_mode->vdisplay,
6307                                       drm_mode->clock,
6308                                       dc_result,
6309                                       dc_status_to_str(dc_result));
6310
6311                         dc_stream_release(stream);
6312                         stream = NULL;
6313                         requested_bpc -= 2; /* lower bpc to retry validation */
6314                 }
6315
6316         } while (stream == NULL && requested_bpc >= 6);
6317
6318         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6319                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6320
6321                 aconnector->force_yuv420_output = true;
6322                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6323                                                 dm_state, old_stream);
6324                 aconnector->force_yuv420_output = false;
6325         }
6326
6327         return stream;
6328 }
6329
6330 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6331                                    struct drm_display_mode *mode)
6332 {
6333         int result = MODE_ERROR;
6334         struct dc_sink *dc_sink;
6335         /* TODO: Unhardcode stream count */
6336         struct dc_stream_state *stream;
6337         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6338
6339         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6340                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6341                 return result;
6342
6343         /*
6344          * Only run this the first time mode_valid is called to initilialize
6345          * EDID mgmt
6346          */
6347         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6348                 !aconnector->dc_em_sink)
6349                 handle_edid_mgmt(aconnector);
6350
6351         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6352
6353         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6354                                 aconnector->base.force != DRM_FORCE_ON) {
6355                 DRM_ERROR("dc_sink is NULL!\n");
6356                 goto fail;
6357         }
6358
6359         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6360         if (stream) {
6361                 dc_stream_release(stream);
6362                 result = MODE_OK;
6363         }
6364
6365 fail:
6366         /* TODO: error handling*/
6367         return result;
6368 }
6369
6370 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6371                                 struct dc_info_packet *out)
6372 {
6373         struct hdmi_drm_infoframe frame;
6374         unsigned char buf[30]; /* 26 + 4 */
6375         ssize_t len;
6376         int ret, i;
6377
6378         memset(out, 0, sizeof(*out));
6379
6380         if (!state->hdr_output_metadata)
6381                 return 0;
6382
6383         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6384         if (ret)
6385                 return ret;
6386
6387         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6388         if (len < 0)
6389                 return (int)len;
6390
6391         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6392         if (len != 30)
6393                 return -EINVAL;
6394
6395         /* Prepare the infopacket for DC. */
6396         switch (state->connector->connector_type) {
6397         case DRM_MODE_CONNECTOR_HDMIA:
6398                 out->hb0 = 0x87; /* type */
6399                 out->hb1 = 0x01; /* version */
6400                 out->hb2 = 0x1A; /* length */
6401                 out->sb[0] = buf[3]; /* checksum */
6402                 i = 1;
6403                 break;
6404
6405         case DRM_MODE_CONNECTOR_DisplayPort:
6406         case DRM_MODE_CONNECTOR_eDP:
6407                 out->hb0 = 0x00; /* sdp id, zero */
6408                 out->hb1 = 0x87; /* type */
6409                 out->hb2 = 0x1D; /* payload len - 1 */
6410                 out->hb3 = (0x13 << 2); /* sdp version */
6411                 out->sb[0] = 0x01; /* version */
6412                 out->sb[1] = 0x1A; /* length */
6413                 i = 2;
6414                 break;
6415
6416         default:
6417                 return -EINVAL;
6418         }
6419
6420         memcpy(&out->sb[i], &buf[4], 26);
6421         out->valid = true;
6422
6423         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6424                        sizeof(out->sb), false);
6425
6426         return 0;
6427 }
6428
6429 static int
6430 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6431                                  struct drm_atomic_state *state)
6432 {
6433         struct drm_connector_state *new_con_state =
6434                 drm_atomic_get_new_connector_state(state, conn);
6435         struct drm_connector_state *old_con_state =
6436                 drm_atomic_get_old_connector_state(state, conn);
6437         struct drm_crtc *crtc = new_con_state->crtc;
6438         struct drm_crtc_state *new_crtc_state;
6439         int ret;
6440
6441         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6442
6443         if (!crtc)
6444                 return 0;
6445
6446         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6447                 struct dc_info_packet hdr_infopacket;
6448
6449                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6450                 if (ret)
6451                         return ret;
6452
6453                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6454                 if (IS_ERR(new_crtc_state))
6455                         return PTR_ERR(new_crtc_state);
6456
6457                 /*
6458                  * DC considers the stream backends changed if the
6459                  * static metadata changes. Forcing the modeset also
6460                  * gives a simple way for userspace to switch from
6461                  * 8bpc to 10bpc when setting the metadata to enter
6462                  * or exit HDR.
6463                  *
6464                  * Changing the static metadata after it's been
6465                  * set is permissible, however. So only force a
6466                  * modeset if we're entering or exiting HDR.
6467                  */
6468                 new_crtc_state->mode_changed =
6469                         !old_con_state->hdr_output_metadata ||
6470                         !new_con_state->hdr_output_metadata;
6471         }
6472
6473         return 0;
6474 }
6475
6476 static const struct drm_connector_helper_funcs
6477 amdgpu_dm_connector_helper_funcs = {
6478         /*
6479          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6480          * modes will be filtered by drm_mode_validate_size(), and those modes
6481          * are missing after user start lightdm. So we need to renew modes list.
6482          * in get_modes call back, not just return the modes count
6483          */
6484         .get_modes = get_modes,
6485         .mode_valid = amdgpu_dm_connector_mode_valid,
6486         .atomic_check = amdgpu_dm_connector_atomic_check,
6487 };
6488
6489 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6490 {
6491 }
6492
6493 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6494 {
6495         struct drm_atomic_state *state = new_crtc_state->state;
6496         struct drm_plane *plane;
6497         int num_active = 0;
6498
6499         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6500                 struct drm_plane_state *new_plane_state;
6501
6502                 /* Cursor planes are "fake". */
6503                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6504                         continue;
6505
6506                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6507
6508                 if (!new_plane_state) {
6509                         /*
6510                          * The plane is enable on the CRTC and hasn't changed
6511                          * state. This means that it previously passed
6512                          * validation and is therefore enabled.
6513                          */
6514                         num_active += 1;
6515                         continue;
6516                 }
6517
6518                 /* We need a framebuffer to be considered enabled. */
6519                 num_active += (new_plane_state->fb != NULL);
6520         }
6521
6522         return num_active;
6523 }
6524
6525 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6526                                          struct drm_crtc_state *new_crtc_state)
6527 {
6528         struct dm_crtc_state *dm_new_crtc_state =
6529                 to_dm_crtc_state(new_crtc_state);
6530
6531         dm_new_crtc_state->active_planes = 0;
6532
6533         if (!dm_new_crtc_state->stream)
6534                 return;
6535
6536         dm_new_crtc_state->active_planes =
6537                 count_crtc_active_planes(new_crtc_state);
6538 }
6539
6540 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6541                                        struct drm_atomic_state *state)
6542 {
6543         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6544                                                                           crtc);
6545         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6546         struct dc *dc = adev->dm.dc;
6547         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6548         int ret = -EINVAL;
6549
6550         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6551
6552         dm_update_crtc_active_planes(crtc, crtc_state);
6553
6554         if (unlikely(!dm_crtc_state->stream &&
6555                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6556                 WARN_ON(1);
6557                 return ret;
6558         }
6559
6560         /*
6561          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6562          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6563          * planes are disabled, which is not supported by the hardware. And there is legacy
6564          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6565          */
6566         if (crtc_state->enable &&
6567             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6568                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6569                 return -EINVAL;
6570         }
6571
6572         /* In some use cases, like reset, no stream is attached */
6573         if (!dm_crtc_state->stream)
6574                 return 0;
6575
6576         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6577                 return 0;
6578
6579         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6580         return ret;
6581 }
6582
6583 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6584                                       const struct drm_display_mode *mode,
6585                                       struct drm_display_mode *adjusted_mode)
6586 {
6587         return true;
6588 }
6589
6590 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6591         .disable = dm_crtc_helper_disable,
6592         .atomic_check = dm_crtc_helper_atomic_check,
6593         .mode_fixup = dm_crtc_helper_mode_fixup,
6594         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6595 };
6596
6597 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6598 {
6599
6600 }
6601
6602 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6603 {
6604         switch (display_color_depth) {
6605                 case COLOR_DEPTH_666:
6606                         return 6;
6607                 case COLOR_DEPTH_888:
6608                         return 8;
6609                 case COLOR_DEPTH_101010:
6610                         return 10;
6611                 case COLOR_DEPTH_121212:
6612                         return 12;
6613                 case COLOR_DEPTH_141414:
6614                         return 14;
6615                 case COLOR_DEPTH_161616:
6616                         return 16;
6617                 default:
6618                         break;
6619                 }
6620         return 0;
6621 }
6622
6623 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6624                                           struct drm_crtc_state *crtc_state,
6625                                           struct drm_connector_state *conn_state)
6626 {
6627         struct drm_atomic_state *state = crtc_state->state;
6628         struct drm_connector *connector = conn_state->connector;
6629         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6630         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6631         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6632         struct drm_dp_mst_topology_mgr *mst_mgr;
6633         struct drm_dp_mst_port *mst_port;
6634         enum dc_color_depth color_depth;
6635         int clock, bpp = 0;
6636         bool is_y420 = false;
6637
6638         if (!aconnector->port || !aconnector->dc_sink)
6639                 return 0;
6640
6641         mst_port = aconnector->port;
6642         mst_mgr = &aconnector->mst_port->mst_mgr;
6643
6644         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6645                 return 0;
6646
6647         if (!state->duplicated) {
6648                 int max_bpc = conn_state->max_requested_bpc;
6649                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6650                                 aconnector->force_yuv420_output;
6651                 color_depth = convert_color_depth_from_display_info(connector,
6652                                                                     is_y420,
6653                                                                     max_bpc);
6654                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6655                 clock = adjusted_mode->clock;
6656                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6657         }
6658         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6659                                                                            mst_mgr,
6660                                                                            mst_port,
6661                                                                            dm_new_connector_state->pbn,
6662                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6663         if (dm_new_connector_state->vcpi_slots < 0) {
6664                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6665                 return dm_new_connector_state->vcpi_slots;
6666         }
6667         return 0;
6668 }
6669
6670 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6671         .disable = dm_encoder_helper_disable,
6672         .atomic_check = dm_encoder_helper_atomic_check
6673 };
6674
6675 #if defined(CONFIG_DRM_AMD_DC_DCN)
6676 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6677                                             struct dc_state *dc_state)
6678 {
6679         struct dc_stream_state *stream = NULL;
6680         struct drm_connector *connector;
6681         struct drm_connector_state *new_con_state;
6682         struct amdgpu_dm_connector *aconnector;
6683         struct dm_connector_state *dm_conn_state;
6684         int i, j, clock, bpp;
6685         int vcpi, pbn_div, pbn = 0;
6686
6687         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6688
6689                 aconnector = to_amdgpu_dm_connector(connector);
6690
6691                 if (!aconnector->port)
6692                         continue;
6693
6694                 if (!new_con_state || !new_con_state->crtc)
6695                         continue;
6696
6697                 dm_conn_state = to_dm_connector_state(new_con_state);
6698
6699                 for (j = 0; j < dc_state->stream_count; j++) {
6700                         stream = dc_state->streams[j];
6701                         if (!stream)
6702                                 continue;
6703
6704                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6705                                 break;
6706
6707                         stream = NULL;
6708                 }
6709
6710                 if (!stream)
6711                         continue;
6712
6713                 if (stream->timing.flags.DSC != 1) {
6714                         drm_dp_mst_atomic_enable_dsc(state,
6715                                                      aconnector->port,
6716                                                      dm_conn_state->pbn,
6717                                                      0,
6718                                                      false);
6719                         continue;
6720                 }
6721
6722                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6723                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6724                 clock = stream->timing.pix_clk_100hz / 10;
6725                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6726                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6727                                                     aconnector->port,
6728                                                     pbn, pbn_div,
6729                                                     true);
6730                 if (vcpi < 0)
6731                         return vcpi;
6732
6733                 dm_conn_state->pbn = pbn;
6734                 dm_conn_state->vcpi_slots = vcpi;
6735         }
6736         return 0;
6737 }
6738 #endif
6739
6740 static void dm_drm_plane_reset(struct drm_plane *plane)
6741 {
6742         struct dm_plane_state *amdgpu_state = NULL;
6743
6744         if (plane->state)
6745                 plane->funcs->atomic_destroy_state(plane, plane->state);
6746
6747         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6748         WARN_ON(amdgpu_state == NULL);
6749
6750         if (amdgpu_state)
6751                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6752 }
6753
6754 static struct drm_plane_state *
6755 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6756 {
6757         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6758
6759         old_dm_plane_state = to_dm_plane_state(plane->state);
6760         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6761         if (!dm_plane_state)
6762                 return NULL;
6763
6764         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6765
6766         if (old_dm_plane_state->dc_state) {
6767                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6768                 dc_plane_state_retain(dm_plane_state->dc_state);
6769         }
6770
6771         return &dm_plane_state->base;
6772 }
6773
6774 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6775                                 struct drm_plane_state *state)
6776 {
6777         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6778
6779         if (dm_plane_state->dc_state)
6780                 dc_plane_state_release(dm_plane_state->dc_state);
6781
6782         drm_atomic_helper_plane_destroy_state(plane, state);
6783 }
6784
6785 static const struct drm_plane_funcs dm_plane_funcs = {
6786         .update_plane   = drm_atomic_helper_update_plane,
6787         .disable_plane  = drm_atomic_helper_disable_plane,
6788         .destroy        = drm_primary_helper_destroy,
6789         .reset = dm_drm_plane_reset,
6790         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6791         .atomic_destroy_state = dm_drm_plane_destroy_state,
6792         .format_mod_supported = dm_plane_format_mod_supported,
6793 };
6794
6795 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6796                                       struct drm_plane_state *new_state)
6797 {
6798         struct amdgpu_framebuffer *afb;
6799         struct drm_gem_object *obj;
6800         struct amdgpu_device *adev;
6801         struct amdgpu_bo *rbo;
6802         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6803         struct list_head list;
6804         struct ttm_validate_buffer tv;
6805         struct ww_acquire_ctx ticket;
6806         uint32_t domain;
6807         int r;
6808
6809         if (!new_state->fb) {
6810                 DRM_DEBUG_KMS("No FB bound\n");
6811                 return 0;
6812         }
6813
6814         afb = to_amdgpu_framebuffer(new_state->fb);
6815         obj = new_state->fb->obj[0];
6816         rbo = gem_to_amdgpu_bo(obj);
6817         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6818         INIT_LIST_HEAD(&list);
6819
6820         tv.bo = &rbo->tbo;
6821         tv.num_shared = 1;
6822         list_add(&tv.head, &list);
6823
6824         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6825         if (r) {
6826                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6827                 return r;
6828         }
6829
6830         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6831                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6832         else
6833                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6834
6835         r = amdgpu_bo_pin(rbo, domain);
6836         if (unlikely(r != 0)) {
6837                 if (r != -ERESTARTSYS)
6838                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6839                 ttm_eu_backoff_reservation(&ticket, &list);
6840                 return r;
6841         }
6842
6843         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6844         if (unlikely(r != 0)) {
6845                 amdgpu_bo_unpin(rbo);
6846                 ttm_eu_backoff_reservation(&ticket, &list);
6847                 DRM_ERROR("%p bind failed\n", rbo);
6848                 return r;
6849         }
6850
6851         ttm_eu_backoff_reservation(&ticket, &list);
6852
6853         afb->address = amdgpu_bo_gpu_offset(rbo);
6854
6855         amdgpu_bo_ref(rbo);
6856
6857         /**
6858          * We don't do surface updates on planes that have been newly created,
6859          * but we also don't have the afb->address during atomic check.
6860          *
6861          * Fill in buffer attributes depending on the address here, but only on
6862          * newly created planes since they're not being used by DC yet and this
6863          * won't modify global state.
6864          */
6865         dm_plane_state_old = to_dm_plane_state(plane->state);
6866         dm_plane_state_new = to_dm_plane_state(new_state);
6867
6868         if (dm_plane_state_new->dc_state &&
6869             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6870                 struct dc_plane_state *plane_state =
6871                         dm_plane_state_new->dc_state;
6872                 bool force_disable_dcc = !plane_state->dcc.enable;
6873
6874                 fill_plane_buffer_attributes(
6875                         adev, afb, plane_state->format, plane_state->rotation,
6876                         afb->tiling_flags,
6877                         &plane_state->tiling_info, &plane_state->plane_size,
6878                         &plane_state->dcc, &plane_state->address,
6879                         afb->tmz_surface, force_disable_dcc);
6880         }
6881
6882         return 0;
6883 }
6884
6885 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6886                                        struct drm_plane_state *old_state)
6887 {
6888         struct amdgpu_bo *rbo;
6889         int r;
6890
6891         if (!old_state->fb)
6892                 return;
6893
6894         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6895         r = amdgpu_bo_reserve(rbo, false);
6896         if (unlikely(r)) {
6897                 DRM_ERROR("failed to reserve rbo before unpin\n");
6898                 return;
6899         }
6900
6901         amdgpu_bo_unpin(rbo);
6902         amdgpu_bo_unreserve(rbo);
6903         amdgpu_bo_unref(&rbo);
6904 }
6905
6906 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6907                                        struct drm_crtc_state *new_crtc_state)
6908 {
6909         struct drm_framebuffer *fb = state->fb;
6910         int min_downscale, max_upscale;
6911         int min_scale = 0;
6912         int max_scale = INT_MAX;
6913
6914         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6915         if (fb && state->crtc) {
6916                 /* Validate viewport to cover the case when only the position changes */
6917                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6918                         int viewport_width = state->crtc_w;
6919                         int viewport_height = state->crtc_h;
6920
6921                         if (state->crtc_x < 0)
6922                                 viewport_width += state->crtc_x;
6923                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6924                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6925
6926                         if (state->crtc_y < 0)
6927                                 viewport_height += state->crtc_y;
6928                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6929                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6930
6931                         if (viewport_width < 0 || viewport_height < 0) {
6932                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6933                                 return -EINVAL;
6934                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6935                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6936                                 return -EINVAL;
6937                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6938                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6939                                 return -EINVAL;
6940                         }
6941
6942                 }
6943
6944                 /* Get min/max allowed scaling factors from plane caps. */
6945                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6946                                              &min_downscale, &max_upscale);
6947                 /*
6948                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6949                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6950                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6951                  */
6952                 min_scale = (1000 << 16) / max_upscale;
6953                 max_scale = (1000 << 16) / min_downscale;
6954         }
6955
6956         return drm_atomic_helper_check_plane_state(
6957                 state, new_crtc_state, min_scale, max_scale, true, true);
6958 }
6959
6960 static int dm_plane_atomic_check(struct drm_plane *plane,
6961                                  struct drm_atomic_state *state)
6962 {
6963         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6964                                                                                  plane);
6965         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6966         struct dc *dc = adev->dm.dc;
6967         struct dm_plane_state *dm_plane_state;
6968         struct dc_scaling_info scaling_info;
6969         struct drm_crtc_state *new_crtc_state;
6970         int ret;
6971
6972         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6973
6974         dm_plane_state = to_dm_plane_state(new_plane_state);
6975
6976         if (!dm_plane_state->dc_state)
6977                 return 0;
6978
6979         new_crtc_state =
6980                 drm_atomic_get_new_crtc_state(state,
6981                                               new_plane_state->crtc);
6982         if (!new_crtc_state)
6983                 return -EINVAL;
6984
6985         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6986         if (ret)
6987                 return ret;
6988
6989         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6990         if (ret)
6991                 return ret;
6992
6993         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6994                 return 0;
6995
6996         return -EINVAL;
6997 }
6998
6999 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7000                                        struct drm_atomic_state *state)
7001 {
7002         /* Only support async updates on cursor planes. */
7003         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7004                 return -EINVAL;
7005
7006         return 0;
7007 }
7008
7009 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7010                                          struct drm_atomic_state *state)
7011 {
7012         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7013                                                                            plane);
7014         struct drm_plane_state *old_state =
7015                 drm_atomic_get_old_plane_state(state, plane);
7016
7017         trace_amdgpu_dm_atomic_update_cursor(new_state);
7018
7019         swap(plane->state->fb, new_state->fb);
7020
7021         plane->state->src_x = new_state->src_x;
7022         plane->state->src_y = new_state->src_y;
7023         plane->state->src_w = new_state->src_w;
7024         plane->state->src_h = new_state->src_h;
7025         plane->state->crtc_x = new_state->crtc_x;
7026         plane->state->crtc_y = new_state->crtc_y;
7027         plane->state->crtc_w = new_state->crtc_w;
7028         plane->state->crtc_h = new_state->crtc_h;
7029
7030         handle_cursor_update(plane, old_state);
7031 }
7032
7033 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7034         .prepare_fb = dm_plane_helper_prepare_fb,
7035         .cleanup_fb = dm_plane_helper_cleanup_fb,
7036         .atomic_check = dm_plane_atomic_check,
7037         .atomic_async_check = dm_plane_atomic_async_check,
7038         .atomic_async_update = dm_plane_atomic_async_update
7039 };
7040
7041 /*
7042  * TODO: these are currently initialized to rgb formats only.
7043  * For future use cases we should either initialize them dynamically based on
7044  * plane capabilities, or initialize this array to all formats, so internal drm
7045  * check will succeed, and let DC implement proper check
7046  */
7047 static const uint32_t rgb_formats[] = {
7048         DRM_FORMAT_XRGB8888,
7049         DRM_FORMAT_ARGB8888,
7050         DRM_FORMAT_RGBA8888,
7051         DRM_FORMAT_XRGB2101010,
7052         DRM_FORMAT_XBGR2101010,
7053         DRM_FORMAT_ARGB2101010,
7054         DRM_FORMAT_ABGR2101010,
7055         DRM_FORMAT_XBGR8888,
7056         DRM_FORMAT_ABGR8888,
7057         DRM_FORMAT_RGB565,
7058 };
7059
7060 static const uint32_t overlay_formats[] = {
7061         DRM_FORMAT_XRGB8888,
7062         DRM_FORMAT_ARGB8888,
7063         DRM_FORMAT_RGBA8888,
7064         DRM_FORMAT_XBGR8888,
7065         DRM_FORMAT_ABGR8888,
7066         DRM_FORMAT_RGB565
7067 };
7068
7069 static const u32 cursor_formats[] = {
7070         DRM_FORMAT_ARGB8888
7071 };
7072
7073 static int get_plane_formats(const struct drm_plane *plane,
7074                              const struct dc_plane_cap *plane_cap,
7075                              uint32_t *formats, int max_formats)
7076 {
7077         int i, num_formats = 0;
7078
7079         /*
7080          * TODO: Query support for each group of formats directly from
7081          * DC plane caps. This will require adding more formats to the
7082          * caps list.
7083          */
7084
7085         switch (plane->type) {
7086         case DRM_PLANE_TYPE_PRIMARY:
7087                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7088                         if (num_formats >= max_formats)
7089                                 break;
7090
7091                         formats[num_formats++] = rgb_formats[i];
7092                 }
7093
7094                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7095                         formats[num_formats++] = DRM_FORMAT_NV12;
7096                 if (plane_cap && plane_cap->pixel_format_support.p010)
7097                         formats[num_formats++] = DRM_FORMAT_P010;
7098                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7099                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7100                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7101                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7102                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7103                 }
7104                 break;
7105
7106         case DRM_PLANE_TYPE_OVERLAY:
7107                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7108                         if (num_formats >= max_formats)
7109                                 break;
7110
7111                         formats[num_formats++] = overlay_formats[i];
7112                 }
7113                 break;
7114
7115         case DRM_PLANE_TYPE_CURSOR:
7116                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7117                         if (num_formats >= max_formats)
7118                                 break;
7119
7120                         formats[num_formats++] = cursor_formats[i];
7121                 }
7122                 break;
7123         }
7124
7125         return num_formats;
7126 }
7127
7128 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7129                                 struct drm_plane *plane,
7130                                 unsigned long possible_crtcs,
7131                                 const struct dc_plane_cap *plane_cap)
7132 {
7133         uint32_t formats[32];
7134         int num_formats;
7135         int res = -EPERM;
7136         unsigned int supported_rotations;
7137         uint64_t *modifiers = NULL;
7138
7139         num_formats = get_plane_formats(plane, plane_cap, formats,
7140                                         ARRAY_SIZE(formats));
7141
7142         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7143         if (res)
7144                 return res;
7145
7146         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7147                                        &dm_plane_funcs, formats, num_formats,
7148                                        modifiers, plane->type, NULL);
7149         kfree(modifiers);
7150         if (res)
7151                 return res;
7152
7153         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7154             plane_cap && plane_cap->per_pixel_alpha) {
7155                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7156                                           BIT(DRM_MODE_BLEND_PREMULTI);
7157
7158                 drm_plane_create_alpha_property(plane);
7159                 drm_plane_create_blend_mode_property(plane, blend_caps);
7160         }
7161
7162         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7163             plane_cap &&
7164             (plane_cap->pixel_format_support.nv12 ||
7165              plane_cap->pixel_format_support.p010)) {
7166                 /* This only affects YUV formats. */
7167                 drm_plane_create_color_properties(
7168                         plane,
7169                         BIT(DRM_COLOR_YCBCR_BT601) |
7170                         BIT(DRM_COLOR_YCBCR_BT709) |
7171                         BIT(DRM_COLOR_YCBCR_BT2020),
7172                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7173                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7174                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7175         }
7176
7177         supported_rotations =
7178                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7179                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7180
7181         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7182             plane->type != DRM_PLANE_TYPE_CURSOR)
7183                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7184                                                    supported_rotations);
7185
7186         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7187
7188         /* Create (reset) the plane state */
7189         if (plane->funcs->reset)
7190                 plane->funcs->reset(plane);
7191
7192         return 0;
7193 }
7194
7195 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7196                                struct drm_plane *plane,
7197                                uint32_t crtc_index)
7198 {
7199         struct amdgpu_crtc *acrtc = NULL;
7200         struct drm_plane *cursor_plane;
7201
7202         int res = -ENOMEM;
7203
7204         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7205         if (!cursor_plane)
7206                 goto fail;
7207
7208         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7209         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7210
7211         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7212         if (!acrtc)
7213                 goto fail;
7214
7215         res = drm_crtc_init_with_planes(
7216                         dm->ddev,
7217                         &acrtc->base,
7218                         plane,
7219                         cursor_plane,
7220                         &amdgpu_dm_crtc_funcs, NULL);
7221
7222         if (res)
7223                 goto fail;
7224
7225         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7226
7227         /* Create (reset) the plane state */
7228         if (acrtc->base.funcs->reset)
7229                 acrtc->base.funcs->reset(&acrtc->base);
7230
7231         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7232         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7233
7234         acrtc->crtc_id = crtc_index;
7235         acrtc->base.enabled = false;
7236         acrtc->otg_inst = -1;
7237
7238         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7239         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7240                                    true, MAX_COLOR_LUT_ENTRIES);
7241         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7242
7243         return 0;
7244
7245 fail:
7246         kfree(acrtc);
7247         kfree(cursor_plane);
7248         return res;
7249 }
7250
7251
7252 static int to_drm_connector_type(enum signal_type st)
7253 {
7254         switch (st) {
7255         case SIGNAL_TYPE_HDMI_TYPE_A:
7256                 return DRM_MODE_CONNECTOR_HDMIA;
7257         case SIGNAL_TYPE_EDP:
7258                 return DRM_MODE_CONNECTOR_eDP;
7259         case SIGNAL_TYPE_LVDS:
7260                 return DRM_MODE_CONNECTOR_LVDS;
7261         case SIGNAL_TYPE_RGB:
7262                 return DRM_MODE_CONNECTOR_VGA;
7263         case SIGNAL_TYPE_DISPLAY_PORT:
7264         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7265                 return DRM_MODE_CONNECTOR_DisplayPort;
7266         case SIGNAL_TYPE_DVI_DUAL_LINK:
7267         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7268                 return DRM_MODE_CONNECTOR_DVID;
7269         case SIGNAL_TYPE_VIRTUAL:
7270                 return DRM_MODE_CONNECTOR_VIRTUAL;
7271
7272         default:
7273                 return DRM_MODE_CONNECTOR_Unknown;
7274         }
7275 }
7276
7277 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7278 {
7279         struct drm_encoder *encoder;
7280
7281         /* There is only one encoder per connector */
7282         drm_connector_for_each_possible_encoder(connector, encoder)
7283                 return encoder;
7284
7285         return NULL;
7286 }
7287
7288 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7289 {
7290         struct drm_encoder *encoder;
7291         struct amdgpu_encoder *amdgpu_encoder;
7292
7293         encoder = amdgpu_dm_connector_to_encoder(connector);
7294
7295         if (encoder == NULL)
7296                 return;
7297
7298         amdgpu_encoder = to_amdgpu_encoder(encoder);
7299
7300         amdgpu_encoder->native_mode.clock = 0;
7301
7302         if (!list_empty(&connector->probed_modes)) {
7303                 struct drm_display_mode *preferred_mode = NULL;
7304
7305                 list_for_each_entry(preferred_mode,
7306                                     &connector->probed_modes,
7307                                     head) {
7308                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7309                                 amdgpu_encoder->native_mode = *preferred_mode;
7310
7311                         break;
7312                 }
7313
7314         }
7315 }
7316
7317 static struct drm_display_mode *
7318 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7319                              char *name,
7320                              int hdisplay, int vdisplay)
7321 {
7322         struct drm_device *dev = encoder->dev;
7323         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7324         struct drm_display_mode *mode = NULL;
7325         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7326
7327         mode = drm_mode_duplicate(dev, native_mode);
7328
7329         if (mode == NULL)
7330                 return NULL;
7331
7332         mode->hdisplay = hdisplay;
7333         mode->vdisplay = vdisplay;
7334         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7335         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7336
7337         return mode;
7338
7339 }
7340
7341 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7342                                                  struct drm_connector *connector)
7343 {
7344         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7345         struct drm_display_mode *mode = NULL;
7346         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7347         struct amdgpu_dm_connector *amdgpu_dm_connector =
7348                                 to_amdgpu_dm_connector(connector);
7349         int i;
7350         int n;
7351         struct mode_size {
7352                 char name[DRM_DISPLAY_MODE_LEN];
7353                 int w;
7354                 int h;
7355         } common_modes[] = {
7356                 {  "640x480",  640,  480},
7357                 {  "800x600",  800,  600},
7358                 { "1024x768", 1024,  768},
7359                 { "1280x720", 1280,  720},
7360                 { "1280x800", 1280,  800},
7361                 {"1280x1024", 1280, 1024},
7362                 { "1440x900", 1440,  900},
7363                 {"1680x1050", 1680, 1050},
7364                 {"1600x1200", 1600, 1200},
7365                 {"1920x1080", 1920, 1080},
7366                 {"1920x1200", 1920, 1200}
7367         };
7368
7369         n = ARRAY_SIZE(common_modes);
7370
7371         for (i = 0; i < n; i++) {
7372                 struct drm_display_mode *curmode = NULL;
7373                 bool mode_existed = false;
7374
7375                 if (common_modes[i].w > native_mode->hdisplay ||
7376                     common_modes[i].h > native_mode->vdisplay ||
7377                    (common_modes[i].w == native_mode->hdisplay &&
7378                     common_modes[i].h == native_mode->vdisplay))
7379                         continue;
7380
7381                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7382                         if (common_modes[i].w == curmode->hdisplay &&
7383                             common_modes[i].h == curmode->vdisplay) {
7384                                 mode_existed = true;
7385                                 break;
7386                         }
7387                 }
7388
7389                 if (mode_existed)
7390                         continue;
7391
7392                 mode = amdgpu_dm_create_common_mode(encoder,
7393                                 common_modes[i].name, common_modes[i].w,
7394                                 common_modes[i].h);
7395                 drm_mode_probed_add(connector, mode);
7396                 amdgpu_dm_connector->num_modes++;
7397         }
7398 }
7399
7400 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7401                                               struct edid *edid)
7402 {
7403         struct amdgpu_dm_connector *amdgpu_dm_connector =
7404                         to_amdgpu_dm_connector(connector);
7405
7406         if (edid) {
7407                 /* empty probed_modes */
7408                 INIT_LIST_HEAD(&connector->probed_modes);
7409                 amdgpu_dm_connector->num_modes =
7410                                 drm_add_edid_modes(connector, edid);
7411
7412                 /* sorting the probed modes before calling function
7413                  * amdgpu_dm_get_native_mode() since EDID can have
7414                  * more than one preferred mode. The modes that are
7415                  * later in the probed mode list could be of higher
7416                  * and preferred resolution. For example, 3840x2160
7417                  * resolution in base EDID preferred timing and 4096x2160
7418                  * preferred resolution in DID extension block later.
7419                  */
7420                 drm_mode_sort(&connector->probed_modes);
7421                 amdgpu_dm_get_native_mode(connector);
7422
7423                 /* Freesync capabilities are reset by calling
7424                  * drm_add_edid_modes() and need to be
7425                  * restored here.
7426                  */
7427                 amdgpu_dm_update_freesync_caps(connector, edid);
7428         } else {
7429                 amdgpu_dm_connector->num_modes = 0;
7430         }
7431 }
7432
7433 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7434                               struct drm_display_mode *mode)
7435 {
7436         struct drm_display_mode *m;
7437
7438         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7439                 if (drm_mode_equal(m, mode))
7440                         return true;
7441         }
7442
7443         return false;
7444 }
7445
7446 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7447 {
7448         const struct drm_display_mode *m;
7449         struct drm_display_mode *new_mode;
7450         uint i;
7451         uint32_t new_modes_count = 0;
7452
7453         /* Standard FPS values
7454          *
7455          * 23.976   - TV/NTSC
7456          * 24       - Cinema
7457          * 25       - TV/PAL
7458          * 29.97    - TV/NTSC
7459          * 30       - TV/NTSC
7460          * 48       - Cinema HFR
7461          * 50       - TV/PAL
7462          * 60       - Commonly used
7463          * 48,72,96 - Multiples of 24
7464          */
7465         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7466                                          48000, 50000, 60000, 72000, 96000 };
7467
7468         /*
7469          * Find mode with highest refresh rate with the same resolution
7470          * as the preferred mode. Some monitors report a preferred mode
7471          * with lower resolution than the highest refresh rate supported.
7472          */
7473
7474         m = get_highest_refresh_rate_mode(aconnector, true);
7475         if (!m)
7476                 return 0;
7477
7478         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7479                 uint64_t target_vtotal, target_vtotal_diff;
7480                 uint64_t num, den;
7481
7482                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7483                         continue;
7484
7485                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7486                     common_rates[i] > aconnector->max_vfreq * 1000)
7487                         continue;
7488
7489                 num = (unsigned long long)m->clock * 1000 * 1000;
7490                 den = common_rates[i] * (unsigned long long)m->htotal;
7491                 target_vtotal = div_u64(num, den);
7492                 target_vtotal_diff = target_vtotal - m->vtotal;
7493
7494                 /* Check for illegal modes */
7495                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7496                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7497                     m->vtotal + target_vtotal_diff < m->vsync_end)
7498                         continue;
7499
7500                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7501                 if (!new_mode)
7502                         goto out;
7503
7504                 new_mode->vtotal += (u16)target_vtotal_diff;
7505                 new_mode->vsync_start += (u16)target_vtotal_diff;
7506                 new_mode->vsync_end += (u16)target_vtotal_diff;
7507                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7508                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7509
7510                 if (!is_duplicate_mode(aconnector, new_mode)) {
7511                         drm_mode_probed_add(&aconnector->base, new_mode);
7512                         new_modes_count += 1;
7513                 } else
7514                         drm_mode_destroy(aconnector->base.dev, new_mode);
7515         }
7516  out:
7517         return new_modes_count;
7518 }
7519
7520 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7521                                                    struct edid *edid)
7522 {
7523         struct amdgpu_dm_connector *amdgpu_dm_connector =
7524                 to_amdgpu_dm_connector(connector);
7525
7526         if (!(amdgpu_freesync_vid_mode && edid))
7527                 return;
7528
7529         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7530                 amdgpu_dm_connector->num_modes +=
7531                         add_fs_modes(amdgpu_dm_connector);
7532 }
7533
7534 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7535 {
7536         struct amdgpu_dm_connector *amdgpu_dm_connector =
7537                         to_amdgpu_dm_connector(connector);
7538         struct drm_encoder *encoder;
7539         struct edid *edid = amdgpu_dm_connector->edid;
7540
7541         encoder = amdgpu_dm_connector_to_encoder(connector);
7542
7543         if (!drm_edid_is_valid(edid)) {
7544                 amdgpu_dm_connector->num_modes =
7545                                 drm_add_modes_noedid(connector, 640, 480);
7546         } else {
7547                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7548                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7549                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7550         }
7551         amdgpu_dm_fbc_init(connector);
7552
7553         return amdgpu_dm_connector->num_modes;
7554 }
7555
7556 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7557                                      struct amdgpu_dm_connector *aconnector,
7558                                      int connector_type,
7559                                      struct dc_link *link,
7560                                      int link_index)
7561 {
7562         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7563
7564         /*
7565          * Some of the properties below require access to state, like bpc.
7566          * Allocate some default initial connector state with our reset helper.
7567          */
7568         if (aconnector->base.funcs->reset)
7569                 aconnector->base.funcs->reset(&aconnector->base);
7570
7571         aconnector->connector_id = link_index;
7572         aconnector->dc_link = link;
7573         aconnector->base.interlace_allowed = false;
7574         aconnector->base.doublescan_allowed = false;
7575         aconnector->base.stereo_allowed = false;
7576         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7577         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7578         aconnector->audio_inst = -1;
7579         mutex_init(&aconnector->hpd_lock);
7580
7581         /*
7582          * configure support HPD hot plug connector_>polled default value is 0
7583          * which means HPD hot plug not supported
7584          */
7585         switch (connector_type) {
7586         case DRM_MODE_CONNECTOR_HDMIA:
7587                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7588                 aconnector->base.ycbcr_420_allowed =
7589                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7590                 break;
7591         case DRM_MODE_CONNECTOR_DisplayPort:
7592                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7593                 aconnector->base.ycbcr_420_allowed =
7594                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7595                 break;
7596         case DRM_MODE_CONNECTOR_DVID:
7597                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7598                 break;
7599         default:
7600                 break;
7601         }
7602
7603         drm_object_attach_property(&aconnector->base.base,
7604                                 dm->ddev->mode_config.scaling_mode_property,
7605                                 DRM_MODE_SCALE_NONE);
7606
7607         drm_object_attach_property(&aconnector->base.base,
7608                                 adev->mode_info.underscan_property,
7609                                 UNDERSCAN_OFF);
7610         drm_object_attach_property(&aconnector->base.base,
7611                                 adev->mode_info.underscan_hborder_property,
7612                                 0);
7613         drm_object_attach_property(&aconnector->base.base,
7614                                 adev->mode_info.underscan_vborder_property,
7615                                 0);
7616
7617         if (!aconnector->mst_port)
7618                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7619
7620         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7621         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7622         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7623
7624         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7625             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7626                 drm_object_attach_property(&aconnector->base.base,
7627                                 adev->mode_info.abm_level_property, 0);
7628         }
7629
7630         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7631             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7632             connector_type == DRM_MODE_CONNECTOR_eDP) {
7633                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7634
7635                 if (!aconnector->mst_port)
7636                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7637
7638 #ifdef CONFIG_DRM_AMD_DC_HDCP
7639                 if (adev->dm.hdcp_workqueue)
7640                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7641 #endif
7642         }
7643 }
7644
7645 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7646                               struct i2c_msg *msgs, int num)
7647 {
7648         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7649         struct ddc_service *ddc_service = i2c->ddc_service;
7650         struct i2c_command cmd;
7651         int i;
7652         int result = -EIO;
7653
7654         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7655
7656         if (!cmd.payloads)
7657                 return result;
7658
7659         cmd.number_of_payloads = num;
7660         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7661         cmd.speed = 100;
7662
7663         for (i = 0; i < num; i++) {
7664                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7665                 cmd.payloads[i].address = msgs[i].addr;
7666                 cmd.payloads[i].length = msgs[i].len;
7667                 cmd.payloads[i].data = msgs[i].buf;
7668         }
7669
7670         if (dc_submit_i2c(
7671                         ddc_service->ctx->dc,
7672                         ddc_service->ddc_pin->hw_info.ddc_channel,
7673                         &cmd))
7674                 result = num;
7675
7676         kfree(cmd.payloads);
7677         return result;
7678 }
7679
7680 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7681 {
7682         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7683 }
7684
7685 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7686         .master_xfer = amdgpu_dm_i2c_xfer,
7687         .functionality = amdgpu_dm_i2c_func,
7688 };
7689
7690 static struct amdgpu_i2c_adapter *
7691 create_i2c(struct ddc_service *ddc_service,
7692            int link_index,
7693            int *res)
7694 {
7695         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7696         struct amdgpu_i2c_adapter *i2c;
7697
7698         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7699         if (!i2c)
7700                 return NULL;
7701         i2c->base.owner = THIS_MODULE;
7702         i2c->base.class = I2C_CLASS_DDC;
7703         i2c->base.dev.parent = &adev->pdev->dev;
7704         i2c->base.algo = &amdgpu_dm_i2c_algo;
7705         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7706         i2c_set_adapdata(&i2c->base, i2c);
7707         i2c->ddc_service = ddc_service;
7708         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7709
7710         return i2c;
7711 }
7712
7713
7714 /*
7715  * Note: this function assumes that dc_link_detect() was called for the
7716  * dc_link which will be represented by this aconnector.
7717  */
7718 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7719                                     struct amdgpu_dm_connector *aconnector,
7720                                     uint32_t link_index,
7721                                     struct amdgpu_encoder *aencoder)
7722 {
7723         int res = 0;
7724         int connector_type;
7725         struct dc *dc = dm->dc;
7726         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7727         struct amdgpu_i2c_adapter *i2c;
7728
7729         link->priv = aconnector;
7730
7731         DRM_DEBUG_DRIVER("%s()\n", __func__);
7732
7733         i2c = create_i2c(link->ddc, link->link_index, &res);
7734         if (!i2c) {
7735                 DRM_ERROR("Failed to create i2c adapter data\n");
7736                 return -ENOMEM;
7737         }
7738
7739         aconnector->i2c = i2c;
7740         res = i2c_add_adapter(&i2c->base);
7741
7742         if (res) {
7743                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7744                 goto out_free;
7745         }
7746
7747         connector_type = to_drm_connector_type(link->connector_signal);
7748
7749         res = drm_connector_init_with_ddc(
7750                         dm->ddev,
7751                         &aconnector->base,
7752                         &amdgpu_dm_connector_funcs,
7753                         connector_type,
7754                         &i2c->base);
7755
7756         if (res) {
7757                 DRM_ERROR("connector_init failed\n");
7758                 aconnector->connector_id = -1;
7759                 goto out_free;
7760         }
7761
7762         drm_connector_helper_add(
7763                         &aconnector->base,
7764                         &amdgpu_dm_connector_helper_funcs);
7765
7766         amdgpu_dm_connector_init_helper(
7767                 dm,
7768                 aconnector,
7769                 connector_type,
7770                 link,
7771                 link_index);
7772
7773         drm_connector_attach_encoder(
7774                 &aconnector->base, &aencoder->base);
7775
7776         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7777                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7778                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7779
7780 out_free:
7781         if (res) {
7782                 kfree(i2c);
7783                 aconnector->i2c = NULL;
7784         }
7785         return res;
7786 }
7787
7788 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7789 {
7790         switch (adev->mode_info.num_crtc) {
7791         case 1:
7792                 return 0x1;
7793         case 2:
7794                 return 0x3;
7795         case 3:
7796                 return 0x7;
7797         case 4:
7798                 return 0xf;
7799         case 5:
7800                 return 0x1f;
7801         case 6:
7802         default:
7803                 return 0x3f;
7804         }
7805 }
7806
7807 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7808                                   struct amdgpu_encoder *aencoder,
7809                                   uint32_t link_index)
7810 {
7811         struct amdgpu_device *adev = drm_to_adev(dev);
7812
7813         int res = drm_encoder_init(dev,
7814                                    &aencoder->base,
7815                                    &amdgpu_dm_encoder_funcs,
7816                                    DRM_MODE_ENCODER_TMDS,
7817                                    NULL);
7818
7819         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7820
7821         if (!res)
7822                 aencoder->encoder_id = link_index;
7823         else
7824                 aencoder->encoder_id = -1;
7825
7826         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7827
7828         return res;
7829 }
7830
7831 static void manage_dm_interrupts(struct amdgpu_device *adev,
7832                                  struct amdgpu_crtc *acrtc,
7833                                  bool enable)
7834 {
7835         /*
7836          * We have no guarantee that the frontend index maps to the same
7837          * backend index - some even map to more than one.
7838          *
7839          * TODO: Use a different interrupt or check DC itself for the mapping.
7840          */
7841         int irq_type =
7842                 amdgpu_display_crtc_idx_to_irq_type(
7843                         adev,
7844                         acrtc->crtc_id);
7845
7846         if (enable) {
7847                 drm_crtc_vblank_on(&acrtc->base);
7848                 amdgpu_irq_get(
7849                         adev,
7850                         &adev->pageflip_irq,
7851                         irq_type);
7852 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7853                 amdgpu_irq_get(
7854                         adev,
7855                         &adev->vline0_irq,
7856                         irq_type);
7857 #endif
7858         } else {
7859 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7860                 amdgpu_irq_put(
7861                         adev,
7862                         &adev->vline0_irq,
7863                         irq_type);
7864 #endif
7865                 amdgpu_irq_put(
7866                         adev,
7867                         &adev->pageflip_irq,
7868                         irq_type);
7869                 drm_crtc_vblank_off(&acrtc->base);
7870         }
7871 }
7872
7873 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7874                                       struct amdgpu_crtc *acrtc)
7875 {
7876         int irq_type =
7877                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7878
7879         /**
7880          * This reads the current state for the IRQ and force reapplies
7881          * the setting to hardware.
7882          */
7883         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7884 }
7885
7886 static bool
7887 is_scaling_state_different(const struct dm_connector_state *dm_state,
7888                            const struct dm_connector_state *old_dm_state)
7889 {
7890         if (dm_state->scaling != old_dm_state->scaling)
7891                 return true;
7892         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7893                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7894                         return true;
7895         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7896                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7897                         return true;
7898         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7899                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7900                 return true;
7901         return false;
7902 }
7903
7904 #ifdef CONFIG_DRM_AMD_DC_HDCP
7905 static bool is_content_protection_different(struct drm_connector_state *state,
7906                                             const struct drm_connector_state *old_state,
7907                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7908 {
7909         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7910         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7911
7912         /* Handle: Type0/1 change */
7913         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7914             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7915                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7916                 return true;
7917         }
7918
7919         /* CP is being re enabled, ignore this
7920          *
7921          * Handles:     ENABLED -> DESIRED
7922          */
7923         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7924             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7925                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7926                 return false;
7927         }
7928
7929         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7930          *
7931          * Handles:     UNDESIRED -> ENABLED
7932          */
7933         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7934             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7935                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7936
7937         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7938          * hot-plug, headless s3, dpms
7939          *
7940          * Handles:     DESIRED -> DESIRED (Special case)
7941          */
7942         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7943             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7944                 dm_con_state->update_hdcp = false;
7945                 return true;
7946         }
7947
7948         /*
7949          * Handles:     UNDESIRED -> UNDESIRED
7950          *              DESIRED -> DESIRED
7951          *              ENABLED -> ENABLED
7952          */
7953         if (old_state->content_protection == state->content_protection)
7954                 return false;
7955
7956         /*
7957          * Handles:     UNDESIRED -> DESIRED
7958          *              DESIRED -> UNDESIRED
7959          *              ENABLED -> UNDESIRED
7960          */
7961         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7962                 return true;
7963
7964         /*
7965          * Handles:     DESIRED -> ENABLED
7966          */
7967         return false;
7968 }
7969
7970 #endif
7971 static void remove_stream(struct amdgpu_device *adev,
7972                           struct amdgpu_crtc *acrtc,
7973                           struct dc_stream_state *stream)
7974 {
7975         /* this is the update mode case */
7976
7977         acrtc->otg_inst = -1;
7978         acrtc->enabled = false;
7979 }
7980
7981 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7982                                struct dc_cursor_position *position)
7983 {
7984         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7985         int x, y;
7986         int xorigin = 0, yorigin = 0;
7987
7988         if (!crtc || !plane->state->fb)
7989                 return 0;
7990
7991         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7992             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7993                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7994                           __func__,
7995                           plane->state->crtc_w,
7996                           plane->state->crtc_h);
7997                 return -EINVAL;
7998         }
7999
8000         x = plane->state->crtc_x;
8001         y = plane->state->crtc_y;
8002
8003         if (x <= -amdgpu_crtc->max_cursor_width ||
8004             y <= -amdgpu_crtc->max_cursor_height)
8005                 return 0;
8006
8007         if (x < 0) {
8008                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8009                 x = 0;
8010         }
8011         if (y < 0) {
8012                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8013                 y = 0;
8014         }
8015         position->enable = true;
8016         position->translate_by_source = true;
8017         position->x = x;
8018         position->y = y;
8019         position->x_hotspot = xorigin;
8020         position->y_hotspot = yorigin;
8021
8022         return 0;
8023 }
8024
8025 static void handle_cursor_update(struct drm_plane *plane,
8026                                  struct drm_plane_state *old_plane_state)
8027 {
8028         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8029         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8030         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8031         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8032         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8033         uint64_t address = afb ? afb->address : 0;
8034         struct dc_cursor_position position = {0};
8035         struct dc_cursor_attributes attributes;
8036         int ret;
8037
8038         if (!plane->state->fb && !old_plane_state->fb)
8039                 return;
8040
8041         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8042                       __func__,
8043                       amdgpu_crtc->crtc_id,
8044                       plane->state->crtc_w,
8045                       plane->state->crtc_h);
8046
8047         ret = get_cursor_position(plane, crtc, &position);
8048         if (ret)
8049                 return;
8050
8051         if (!position.enable) {
8052                 /* turn off cursor */
8053                 if (crtc_state && crtc_state->stream) {
8054                         mutex_lock(&adev->dm.dc_lock);
8055                         dc_stream_set_cursor_position(crtc_state->stream,
8056                                                       &position);
8057                         mutex_unlock(&adev->dm.dc_lock);
8058                 }
8059                 return;
8060         }
8061
8062         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8063         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8064
8065         memset(&attributes, 0, sizeof(attributes));
8066         attributes.address.high_part = upper_32_bits(address);
8067         attributes.address.low_part  = lower_32_bits(address);
8068         attributes.width             = plane->state->crtc_w;
8069         attributes.height            = plane->state->crtc_h;
8070         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8071         attributes.rotation_angle    = 0;
8072         attributes.attribute_flags.value = 0;
8073
8074         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8075
8076         if (crtc_state->stream) {
8077                 mutex_lock(&adev->dm.dc_lock);
8078                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8079                                                          &attributes))
8080                         DRM_ERROR("DC failed to set cursor attributes\n");
8081
8082                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8083                                                    &position))
8084                         DRM_ERROR("DC failed to set cursor position\n");
8085                 mutex_unlock(&adev->dm.dc_lock);
8086         }
8087 }
8088
8089 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8090 {
8091
8092         assert_spin_locked(&acrtc->base.dev->event_lock);
8093         WARN_ON(acrtc->event);
8094
8095         acrtc->event = acrtc->base.state->event;
8096
8097         /* Set the flip status */
8098         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8099
8100         /* Mark this event as consumed */
8101         acrtc->base.state->event = NULL;
8102
8103         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8104                      acrtc->crtc_id);
8105 }
8106
8107 static void update_freesync_state_on_stream(
8108         struct amdgpu_display_manager *dm,
8109         struct dm_crtc_state *new_crtc_state,
8110         struct dc_stream_state *new_stream,
8111         struct dc_plane_state *surface,
8112         u32 flip_timestamp_in_us)
8113 {
8114         struct mod_vrr_params vrr_params;
8115         struct dc_info_packet vrr_infopacket = {0};
8116         struct amdgpu_device *adev = dm->adev;
8117         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8118         unsigned long flags;
8119         bool pack_sdp_v1_3 = false;
8120
8121         if (!new_stream)
8122                 return;
8123
8124         /*
8125          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8126          * For now it's sufficient to just guard against these conditions.
8127          */
8128
8129         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8130                 return;
8131
8132         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8133         vrr_params = acrtc->dm_irq_params.vrr_params;
8134
8135         if (surface) {
8136                 mod_freesync_handle_preflip(
8137                         dm->freesync_module,
8138                         surface,
8139                         new_stream,
8140                         flip_timestamp_in_us,
8141                         &vrr_params);
8142
8143                 if (adev->family < AMDGPU_FAMILY_AI &&
8144                     amdgpu_dm_vrr_active(new_crtc_state)) {
8145                         mod_freesync_handle_v_update(dm->freesync_module,
8146                                                      new_stream, &vrr_params);
8147
8148                         /* Need to call this before the frame ends. */
8149                         dc_stream_adjust_vmin_vmax(dm->dc,
8150                                                    new_crtc_state->stream,
8151                                                    &vrr_params.adjust);
8152                 }
8153         }
8154
8155         mod_freesync_build_vrr_infopacket(
8156                 dm->freesync_module,
8157                 new_stream,
8158                 &vrr_params,
8159                 PACKET_TYPE_VRR,
8160                 TRANSFER_FUNC_UNKNOWN,
8161                 &vrr_infopacket,
8162                 pack_sdp_v1_3);
8163
8164         new_crtc_state->freesync_timing_changed |=
8165                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8166                         &vrr_params.adjust,
8167                         sizeof(vrr_params.adjust)) != 0);
8168
8169         new_crtc_state->freesync_vrr_info_changed |=
8170                 (memcmp(&new_crtc_state->vrr_infopacket,
8171                         &vrr_infopacket,
8172                         sizeof(vrr_infopacket)) != 0);
8173
8174         acrtc->dm_irq_params.vrr_params = vrr_params;
8175         new_crtc_state->vrr_infopacket = vrr_infopacket;
8176
8177         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8178         new_stream->vrr_infopacket = vrr_infopacket;
8179
8180         if (new_crtc_state->freesync_vrr_info_changed)
8181                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8182                               new_crtc_state->base.crtc->base.id,
8183                               (int)new_crtc_state->base.vrr_enabled,
8184                               (int)vrr_params.state);
8185
8186         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8187 }
8188
8189 static void update_stream_irq_parameters(
8190         struct amdgpu_display_manager *dm,
8191         struct dm_crtc_state *new_crtc_state)
8192 {
8193         struct dc_stream_state *new_stream = new_crtc_state->stream;
8194         struct mod_vrr_params vrr_params;
8195         struct mod_freesync_config config = new_crtc_state->freesync_config;
8196         struct amdgpu_device *adev = dm->adev;
8197         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8198         unsigned long flags;
8199
8200         if (!new_stream)
8201                 return;
8202
8203         /*
8204          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8205          * For now it's sufficient to just guard against these conditions.
8206          */
8207         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8208                 return;
8209
8210         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8211         vrr_params = acrtc->dm_irq_params.vrr_params;
8212
8213         if (new_crtc_state->vrr_supported &&
8214             config.min_refresh_in_uhz &&
8215             config.max_refresh_in_uhz) {
8216                 /*
8217                  * if freesync compatible mode was set, config.state will be set
8218                  * in atomic check
8219                  */
8220                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8221                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8222                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8223                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8224                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8225                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8226                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8227                 } else {
8228                         config.state = new_crtc_state->base.vrr_enabled ?
8229                                                      VRR_STATE_ACTIVE_VARIABLE :
8230                                                      VRR_STATE_INACTIVE;
8231                 }
8232         } else {
8233                 config.state = VRR_STATE_UNSUPPORTED;
8234         }
8235
8236         mod_freesync_build_vrr_params(dm->freesync_module,
8237                                       new_stream,
8238                                       &config, &vrr_params);
8239
8240         new_crtc_state->freesync_timing_changed |=
8241                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8242                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8243
8244         new_crtc_state->freesync_config = config;
8245         /* Copy state for access from DM IRQ handler */
8246         acrtc->dm_irq_params.freesync_config = config;
8247         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8248         acrtc->dm_irq_params.vrr_params = vrr_params;
8249         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8250 }
8251
8252 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8253                                             struct dm_crtc_state *new_state)
8254 {
8255         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8256         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8257
8258         if (!old_vrr_active && new_vrr_active) {
8259                 /* Transition VRR inactive -> active:
8260                  * While VRR is active, we must not disable vblank irq, as a
8261                  * reenable after disable would compute bogus vblank/pflip
8262                  * timestamps if it likely happened inside display front-porch.
8263                  *
8264                  * We also need vupdate irq for the actual core vblank handling
8265                  * at end of vblank.
8266                  */
8267                 dm_set_vupdate_irq(new_state->base.crtc, true);
8268                 drm_crtc_vblank_get(new_state->base.crtc);
8269                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8270                                  __func__, new_state->base.crtc->base.id);
8271         } else if (old_vrr_active && !new_vrr_active) {
8272                 /* Transition VRR active -> inactive:
8273                  * Allow vblank irq disable again for fixed refresh rate.
8274                  */
8275                 dm_set_vupdate_irq(new_state->base.crtc, false);
8276                 drm_crtc_vblank_put(new_state->base.crtc);
8277                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8278                                  __func__, new_state->base.crtc->base.id);
8279         }
8280 }
8281
8282 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8283 {
8284         struct drm_plane *plane;
8285         struct drm_plane_state *old_plane_state;
8286         int i;
8287
8288         /*
8289          * TODO: Make this per-stream so we don't issue redundant updates for
8290          * commits with multiple streams.
8291          */
8292         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8293                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8294                         handle_cursor_update(plane, old_plane_state);
8295 }
8296
8297 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8298                                     struct dc_state *dc_state,
8299                                     struct drm_device *dev,
8300                                     struct amdgpu_display_manager *dm,
8301                                     struct drm_crtc *pcrtc,
8302                                     bool wait_for_vblank)
8303 {
8304         uint32_t i;
8305         uint64_t timestamp_ns;
8306         struct drm_plane *plane;
8307         struct drm_plane_state *old_plane_state, *new_plane_state;
8308         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8309         struct drm_crtc_state *new_pcrtc_state =
8310                         drm_atomic_get_new_crtc_state(state, pcrtc);
8311         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8312         struct dm_crtc_state *dm_old_crtc_state =
8313                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8314         int planes_count = 0, vpos, hpos;
8315         long r;
8316         unsigned long flags;
8317         struct amdgpu_bo *abo;
8318         uint32_t target_vblank, last_flip_vblank;
8319         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8320         bool pflip_present = false;
8321         struct {
8322                 struct dc_surface_update surface_updates[MAX_SURFACES];
8323                 struct dc_plane_info plane_infos[MAX_SURFACES];
8324                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8325                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8326                 struct dc_stream_update stream_update;
8327         } *bundle;
8328
8329         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8330
8331         if (!bundle) {
8332                 dm_error("Failed to allocate update bundle\n");
8333                 goto cleanup;
8334         }
8335
8336         /*
8337          * Disable the cursor first if we're disabling all the planes.
8338          * It'll remain on the screen after the planes are re-enabled
8339          * if we don't.
8340          */
8341         if (acrtc_state->active_planes == 0)
8342                 amdgpu_dm_commit_cursors(state);
8343
8344         /* update planes when needed */
8345         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8346                 struct drm_crtc *crtc = new_plane_state->crtc;
8347                 struct drm_crtc_state *new_crtc_state;
8348                 struct drm_framebuffer *fb = new_plane_state->fb;
8349                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8350                 bool plane_needs_flip;
8351                 struct dc_plane_state *dc_plane;
8352                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8353
8354                 /* Cursor plane is handled after stream updates */
8355                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8356                         continue;
8357
8358                 if (!fb || !crtc || pcrtc != crtc)
8359                         continue;
8360
8361                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8362                 if (!new_crtc_state->active)
8363                         continue;
8364
8365                 dc_plane = dm_new_plane_state->dc_state;
8366
8367                 bundle->surface_updates[planes_count].surface = dc_plane;
8368                 if (new_pcrtc_state->color_mgmt_changed) {
8369                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8370                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8371                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8372                 }
8373
8374                 fill_dc_scaling_info(new_plane_state,
8375                                      &bundle->scaling_infos[planes_count]);
8376
8377                 bundle->surface_updates[planes_count].scaling_info =
8378                         &bundle->scaling_infos[planes_count];
8379
8380                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8381
8382                 pflip_present = pflip_present || plane_needs_flip;
8383
8384                 if (!plane_needs_flip) {
8385                         planes_count += 1;
8386                         continue;
8387                 }
8388
8389                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8390
8391                 /*
8392                  * Wait for all fences on this FB. Do limited wait to avoid
8393                  * deadlock during GPU reset when this fence will not signal
8394                  * but we hold reservation lock for the BO.
8395                  */
8396                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8397                                                         false,
8398                                                         msecs_to_jiffies(5000));
8399                 if (unlikely(r <= 0))
8400                         DRM_ERROR("Waiting for fences timed out!");
8401
8402                 fill_dc_plane_info_and_addr(
8403                         dm->adev, new_plane_state,
8404                         afb->tiling_flags,
8405                         &bundle->plane_infos[planes_count],
8406                         &bundle->flip_addrs[planes_count].address,
8407                         afb->tmz_surface, false);
8408
8409                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8410                                  new_plane_state->plane->index,
8411                                  bundle->plane_infos[planes_count].dcc.enable);
8412
8413                 bundle->surface_updates[planes_count].plane_info =
8414                         &bundle->plane_infos[planes_count];
8415
8416                 /*
8417                  * Only allow immediate flips for fast updates that don't
8418                  * change FB pitch, DCC state, rotation or mirroing.
8419                  */
8420                 bundle->flip_addrs[planes_count].flip_immediate =
8421                         crtc->state->async_flip &&
8422                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8423
8424                 timestamp_ns = ktime_get_ns();
8425                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8426                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8427                 bundle->surface_updates[planes_count].surface = dc_plane;
8428
8429                 if (!bundle->surface_updates[planes_count].surface) {
8430                         DRM_ERROR("No surface for CRTC: id=%d\n",
8431                                         acrtc_attach->crtc_id);
8432                         continue;
8433                 }
8434
8435                 if (plane == pcrtc->primary)
8436                         update_freesync_state_on_stream(
8437                                 dm,
8438                                 acrtc_state,
8439                                 acrtc_state->stream,
8440                                 dc_plane,
8441                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8442
8443                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8444                                  __func__,
8445                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8446                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8447
8448                 planes_count += 1;
8449
8450         }
8451
8452         if (pflip_present) {
8453                 if (!vrr_active) {
8454                         /* Use old throttling in non-vrr fixed refresh rate mode
8455                          * to keep flip scheduling based on target vblank counts
8456                          * working in a backwards compatible way, e.g., for
8457                          * clients using the GLX_OML_sync_control extension or
8458                          * DRI3/Present extension with defined target_msc.
8459                          */
8460                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8461                 }
8462                 else {
8463                         /* For variable refresh rate mode only:
8464                          * Get vblank of last completed flip to avoid > 1 vrr
8465                          * flips per video frame by use of throttling, but allow
8466                          * flip programming anywhere in the possibly large
8467                          * variable vrr vblank interval for fine-grained flip
8468                          * timing control and more opportunity to avoid stutter
8469                          * on late submission of flips.
8470                          */
8471                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8472                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8473                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8474                 }
8475
8476                 target_vblank = last_flip_vblank + wait_for_vblank;
8477
8478                 /*
8479                  * Wait until we're out of the vertical blank period before the one
8480                  * targeted by the flip
8481                  */
8482                 while ((acrtc_attach->enabled &&
8483                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8484                                                             0, &vpos, &hpos, NULL,
8485                                                             NULL, &pcrtc->hwmode)
8486                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8487                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8488                         (int)(target_vblank -
8489                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8490                         usleep_range(1000, 1100);
8491                 }
8492
8493                 /**
8494                  * Prepare the flip event for the pageflip interrupt to handle.
8495                  *
8496                  * This only works in the case where we've already turned on the
8497                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8498                  * from 0 -> n planes we have to skip a hardware generated event
8499                  * and rely on sending it from software.
8500                  */
8501                 if (acrtc_attach->base.state->event &&
8502                     acrtc_state->active_planes > 0) {
8503                         drm_crtc_vblank_get(pcrtc);
8504
8505                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8506
8507                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8508                         prepare_flip_isr(acrtc_attach);
8509
8510                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8511                 }
8512
8513                 if (acrtc_state->stream) {
8514                         if (acrtc_state->freesync_vrr_info_changed)
8515                                 bundle->stream_update.vrr_infopacket =
8516                                         &acrtc_state->stream->vrr_infopacket;
8517                 }
8518         }
8519
8520         /* Update the planes if changed or disable if we don't have any. */
8521         if ((planes_count || acrtc_state->active_planes == 0) &&
8522                 acrtc_state->stream) {
8523                 bundle->stream_update.stream = acrtc_state->stream;
8524                 if (new_pcrtc_state->mode_changed) {
8525                         bundle->stream_update.src = acrtc_state->stream->src;
8526                         bundle->stream_update.dst = acrtc_state->stream->dst;
8527                 }
8528
8529                 if (new_pcrtc_state->color_mgmt_changed) {
8530                         /*
8531                          * TODO: This isn't fully correct since we've actually
8532                          * already modified the stream in place.
8533                          */
8534                         bundle->stream_update.gamut_remap =
8535                                 &acrtc_state->stream->gamut_remap_matrix;
8536                         bundle->stream_update.output_csc_transform =
8537                                 &acrtc_state->stream->csc_color_matrix;
8538                         bundle->stream_update.out_transfer_func =
8539                                 acrtc_state->stream->out_transfer_func;
8540                 }
8541
8542                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8543                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8544                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8545
8546                 /*
8547                  * If FreeSync state on the stream has changed then we need to
8548                  * re-adjust the min/max bounds now that DC doesn't handle this
8549                  * as part of commit.
8550                  */
8551                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8552                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8553                         dc_stream_adjust_vmin_vmax(
8554                                 dm->dc, acrtc_state->stream,
8555                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8556                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8557                 }
8558                 mutex_lock(&dm->dc_lock);
8559                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8560                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8561                         amdgpu_dm_psr_disable(acrtc_state->stream);
8562
8563                 dc_commit_updates_for_stream(dm->dc,
8564                                                      bundle->surface_updates,
8565                                                      planes_count,
8566                                                      acrtc_state->stream,
8567                                                      &bundle->stream_update,
8568                                                      dc_state);
8569
8570                 /**
8571                  * Enable or disable the interrupts on the backend.
8572                  *
8573                  * Most pipes are put into power gating when unused.
8574                  *
8575                  * When power gating is enabled on a pipe we lose the
8576                  * interrupt enablement state when power gating is disabled.
8577                  *
8578                  * So we need to update the IRQ control state in hardware
8579                  * whenever the pipe turns on (since it could be previously
8580                  * power gated) or off (since some pipes can't be power gated
8581                  * on some ASICs).
8582                  */
8583                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8584                         dm_update_pflip_irq_state(drm_to_adev(dev),
8585                                                   acrtc_attach);
8586
8587                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8588                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8589                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8590                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8591                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8592                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8593                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8594                         amdgpu_dm_psr_enable(acrtc_state->stream);
8595                 }
8596
8597                 mutex_unlock(&dm->dc_lock);
8598         }
8599
8600         /*
8601          * Update cursor state *after* programming all the planes.
8602          * This avoids redundant programming in the case where we're going
8603          * to be disabling a single plane - those pipes are being disabled.
8604          */
8605         if (acrtc_state->active_planes)
8606                 amdgpu_dm_commit_cursors(state);
8607
8608 cleanup:
8609         kfree(bundle);
8610 }
8611
8612 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8613                                    struct drm_atomic_state *state)
8614 {
8615         struct amdgpu_device *adev = drm_to_adev(dev);
8616         struct amdgpu_dm_connector *aconnector;
8617         struct drm_connector *connector;
8618         struct drm_connector_state *old_con_state, *new_con_state;
8619         struct drm_crtc_state *new_crtc_state;
8620         struct dm_crtc_state *new_dm_crtc_state;
8621         const struct dc_stream_status *status;
8622         int i, inst;
8623
8624         /* Notify device removals. */
8625         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8626                 if (old_con_state->crtc != new_con_state->crtc) {
8627                         /* CRTC changes require notification. */
8628                         goto notify;
8629                 }
8630
8631                 if (!new_con_state->crtc)
8632                         continue;
8633
8634                 new_crtc_state = drm_atomic_get_new_crtc_state(
8635                         state, new_con_state->crtc);
8636
8637                 if (!new_crtc_state)
8638                         continue;
8639
8640                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8641                         continue;
8642
8643         notify:
8644                 aconnector = to_amdgpu_dm_connector(connector);
8645
8646                 mutex_lock(&adev->dm.audio_lock);
8647                 inst = aconnector->audio_inst;
8648                 aconnector->audio_inst = -1;
8649                 mutex_unlock(&adev->dm.audio_lock);
8650
8651                 amdgpu_dm_audio_eld_notify(adev, inst);
8652         }
8653
8654         /* Notify audio device additions. */
8655         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8656                 if (!new_con_state->crtc)
8657                         continue;
8658
8659                 new_crtc_state = drm_atomic_get_new_crtc_state(
8660                         state, new_con_state->crtc);
8661
8662                 if (!new_crtc_state)
8663                         continue;
8664
8665                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8666                         continue;
8667
8668                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8669                 if (!new_dm_crtc_state->stream)
8670                         continue;
8671
8672                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8673                 if (!status)
8674                         continue;
8675
8676                 aconnector = to_amdgpu_dm_connector(connector);
8677
8678                 mutex_lock(&adev->dm.audio_lock);
8679                 inst = status->audio_inst;
8680                 aconnector->audio_inst = inst;
8681                 mutex_unlock(&adev->dm.audio_lock);
8682
8683                 amdgpu_dm_audio_eld_notify(adev, inst);
8684         }
8685 }
8686
8687 /*
8688  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8689  * @crtc_state: the DRM CRTC state
8690  * @stream_state: the DC stream state.
8691  *
8692  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8693  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8694  */
8695 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8696                                                 struct dc_stream_state *stream_state)
8697 {
8698         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8699 }
8700
8701 /**
8702  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8703  * @state: The atomic state to commit
8704  *
8705  * This will tell DC to commit the constructed DC state from atomic_check,
8706  * programming the hardware. Any failures here implies a hardware failure, since
8707  * atomic check should have filtered anything non-kosher.
8708  */
8709 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8710 {
8711         struct drm_device *dev = state->dev;
8712         struct amdgpu_device *adev = drm_to_adev(dev);
8713         struct amdgpu_display_manager *dm = &adev->dm;
8714         struct dm_atomic_state *dm_state;
8715         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8716         uint32_t i, j;
8717         struct drm_crtc *crtc;
8718         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8719         unsigned long flags;
8720         bool wait_for_vblank = true;
8721         struct drm_connector *connector;
8722         struct drm_connector_state *old_con_state, *new_con_state;
8723         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8724         int crtc_disable_count = 0;
8725         bool mode_set_reset_required = false;
8726
8727         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8728
8729         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8730
8731         dm_state = dm_atomic_get_new_state(state);
8732         if (dm_state && dm_state->context) {
8733                 dc_state = dm_state->context;
8734         } else {
8735                 /* No state changes, retain current state. */
8736                 dc_state_temp = dc_create_state(dm->dc);
8737                 ASSERT(dc_state_temp);
8738                 dc_state = dc_state_temp;
8739                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8740         }
8741
8742         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8743                                        new_crtc_state, i) {
8744                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8745
8746                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8747
8748                 if (old_crtc_state->active &&
8749                     (!new_crtc_state->active ||
8750                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8751                         manage_dm_interrupts(adev, acrtc, false);
8752                         dc_stream_release(dm_old_crtc_state->stream);
8753                 }
8754         }
8755
8756         drm_atomic_helper_calc_timestamping_constants(state);
8757
8758         /* update changed items */
8759         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8760                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8761
8762                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8763                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8764
8765                 DRM_DEBUG_ATOMIC(
8766                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8767                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8768                         "connectors_changed:%d\n",
8769                         acrtc->crtc_id,
8770                         new_crtc_state->enable,
8771                         new_crtc_state->active,
8772                         new_crtc_state->planes_changed,
8773                         new_crtc_state->mode_changed,
8774                         new_crtc_state->active_changed,
8775                         new_crtc_state->connectors_changed);
8776
8777                 /* Disable cursor if disabling crtc */
8778                 if (old_crtc_state->active && !new_crtc_state->active) {
8779                         struct dc_cursor_position position;
8780
8781                         memset(&position, 0, sizeof(position));
8782                         mutex_lock(&dm->dc_lock);
8783                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8784                         mutex_unlock(&dm->dc_lock);
8785                 }
8786
8787                 /* Copy all transient state flags into dc state */
8788                 if (dm_new_crtc_state->stream) {
8789                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8790                                                             dm_new_crtc_state->stream);
8791                 }
8792
8793                 /* handles headless hotplug case, updating new_state and
8794                  * aconnector as needed
8795                  */
8796
8797                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8798
8799                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8800
8801                         if (!dm_new_crtc_state->stream) {
8802                                 /*
8803                                  * this could happen because of issues with
8804                                  * userspace notifications delivery.
8805                                  * In this case userspace tries to set mode on
8806                                  * display which is disconnected in fact.
8807                                  * dc_sink is NULL in this case on aconnector.
8808                                  * We expect reset mode will come soon.
8809                                  *
8810                                  * This can also happen when unplug is done
8811                                  * during resume sequence ended
8812                                  *
8813                                  * In this case, we want to pretend we still
8814                                  * have a sink to keep the pipe running so that
8815                                  * hw state is consistent with the sw state
8816                                  */
8817                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8818                                                 __func__, acrtc->base.base.id);
8819                                 continue;
8820                         }
8821
8822                         if (dm_old_crtc_state->stream)
8823                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8824
8825                         pm_runtime_get_noresume(dev->dev);
8826
8827                         acrtc->enabled = true;
8828                         acrtc->hw_mode = new_crtc_state->mode;
8829                         crtc->hwmode = new_crtc_state->mode;
8830                         mode_set_reset_required = true;
8831                 } else if (modereset_required(new_crtc_state)) {
8832                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8833                         /* i.e. reset mode */
8834                         if (dm_old_crtc_state->stream)
8835                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8836
8837                         mode_set_reset_required = true;
8838                 }
8839         } /* for_each_crtc_in_state() */
8840
8841         if (dc_state) {
8842                 /* if there mode set or reset, disable eDP PSR */
8843                 if (mode_set_reset_required)
8844                         amdgpu_dm_psr_disable_all(dm);
8845
8846                 dm_enable_per_frame_crtc_master_sync(dc_state);
8847                 mutex_lock(&dm->dc_lock);
8848                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8849 #if defined(CONFIG_DRM_AMD_DC_DCN)
8850                /* Allow idle optimization when vblank count is 0 for display off */
8851                if (dm->active_vblank_irq_count == 0)
8852                    dc_allow_idle_optimizations(dm->dc,true);
8853 #endif
8854                 mutex_unlock(&dm->dc_lock);
8855         }
8856
8857         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8858                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8859
8860                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8861
8862                 if (dm_new_crtc_state->stream != NULL) {
8863                         const struct dc_stream_status *status =
8864                                         dc_stream_get_status(dm_new_crtc_state->stream);
8865
8866                         if (!status)
8867                                 status = dc_stream_get_status_from_state(dc_state,
8868                                                                          dm_new_crtc_state->stream);
8869                         if (!status)
8870                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8871                         else
8872                                 acrtc->otg_inst = status->primary_otg_inst;
8873                 }
8874         }
8875 #ifdef CONFIG_DRM_AMD_DC_HDCP
8876         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8877                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8878                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8879                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8880
8881                 new_crtc_state = NULL;
8882
8883                 if (acrtc)
8884                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8885
8886                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8887
8888                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8889                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8890                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8891                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8892                         dm_new_con_state->update_hdcp = true;
8893                         continue;
8894                 }
8895
8896                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8897                         hdcp_update_display(
8898                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8899                                 new_con_state->hdcp_content_type,
8900                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8901         }
8902 #endif
8903
8904         /* Handle connector state changes */
8905         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8906                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8907                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8908                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8909                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8910                 struct dc_stream_update stream_update;
8911                 struct dc_info_packet hdr_packet;
8912                 struct dc_stream_status *status = NULL;
8913                 bool abm_changed, hdr_changed, scaling_changed;
8914
8915                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8916                 memset(&stream_update, 0, sizeof(stream_update));
8917
8918                 if (acrtc) {
8919                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8920                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8921                 }
8922
8923                 /* Skip any modesets/resets */
8924                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8925                         continue;
8926
8927                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8928                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8929
8930                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8931                                                              dm_old_con_state);
8932
8933                 abm_changed = dm_new_crtc_state->abm_level !=
8934                               dm_old_crtc_state->abm_level;
8935
8936                 hdr_changed =
8937                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8938
8939                 if (!scaling_changed && !abm_changed && !hdr_changed)
8940                         continue;
8941
8942                 stream_update.stream = dm_new_crtc_state->stream;
8943                 if (scaling_changed) {
8944                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8945                                         dm_new_con_state, dm_new_crtc_state->stream);
8946
8947                         stream_update.src = dm_new_crtc_state->stream->src;
8948                         stream_update.dst = dm_new_crtc_state->stream->dst;
8949                 }
8950
8951                 if (abm_changed) {
8952                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8953
8954                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8955                 }
8956
8957                 if (hdr_changed) {
8958                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8959                         stream_update.hdr_static_metadata = &hdr_packet;
8960                 }
8961
8962                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8963                 WARN_ON(!status);
8964                 WARN_ON(!status->plane_count);
8965
8966                 /*
8967                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8968                  * Here we create an empty update on each plane.
8969                  * To fix this, DC should permit updating only stream properties.
8970                  */
8971                 for (j = 0; j < status->plane_count; j++)
8972                         dummy_updates[j].surface = status->plane_states[0];
8973
8974
8975                 mutex_lock(&dm->dc_lock);
8976                 dc_commit_updates_for_stream(dm->dc,
8977                                                      dummy_updates,
8978                                                      status->plane_count,
8979                                                      dm_new_crtc_state->stream,
8980                                                      &stream_update,
8981                                                      dc_state);
8982                 mutex_unlock(&dm->dc_lock);
8983         }
8984
8985         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8986         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8987                                       new_crtc_state, i) {
8988                 if (old_crtc_state->active && !new_crtc_state->active)
8989                         crtc_disable_count++;
8990
8991                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8992                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8993
8994                 /* For freesync config update on crtc state and params for irq */
8995                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8996
8997                 /* Handle vrr on->off / off->on transitions */
8998                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8999                                                 dm_new_crtc_state);
9000         }
9001
9002         /**
9003          * Enable interrupts for CRTCs that are newly enabled or went through
9004          * a modeset. It was intentionally deferred until after the front end
9005          * state was modified to wait until the OTG was on and so the IRQ
9006          * handlers didn't access stale or invalid state.
9007          */
9008         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9009                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9010 #ifdef CONFIG_DEBUG_FS
9011                 bool configure_crc = false;
9012                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9013 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9014                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9015 #endif
9016                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9017                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9018                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9019 #endif
9020                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9021
9022                 if (new_crtc_state->active &&
9023                     (!old_crtc_state->active ||
9024                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9025                         dc_stream_retain(dm_new_crtc_state->stream);
9026                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9027                         manage_dm_interrupts(adev, acrtc, true);
9028
9029 #ifdef CONFIG_DEBUG_FS
9030                         /**
9031                          * Frontend may have changed so reapply the CRC capture
9032                          * settings for the stream.
9033                          */
9034                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9035
9036                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9037                                 configure_crc = true;
9038 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9039                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9040                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9041                                         acrtc->dm_irq_params.crc_window.update_win = true;
9042                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9043                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9044                                         crc_rd_wrk->crtc = crtc;
9045                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9046                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9047                                 }
9048 #endif
9049                         }
9050
9051                         if (configure_crc)
9052                                 if (amdgpu_dm_crtc_configure_crc_source(
9053                                         crtc, dm_new_crtc_state, cur_crc_src))
9054                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9055 #endif
9056                 }
9057         }
9058
9059         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9060                 if (new_crtc_state->async_flip)
9061                         wait_for_vblank = false;
9062
9063         /* update planes when needed per crtc*/
9064         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9065                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9066
9067                 if (dm_new_crtc_state->stream)
9068                         amdgpu_dm_commit_planes(state, dc_state, dev,
9069                                                 dm, crtc, wait_for_vblank);
9070         }
9071
9072         /* Update audio instances for each connector. */
9073         amdgpu_dm_commit_audio(dev, state);
9074
9075 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9076         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9077         /* restore the backlight level */
9078         if (dm->backlight_dev)
9079                 amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9080 #endif
9081         /*
9082          * send vblank event on all events not handled in flip and
9083          * mark consumed event for drm_atomic_helper_commit_hw_done
9084          */
9085         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9086         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9087
9088                 if (new_crtc_state->event)
9089                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9090
9091                 new_crtc_state->event = NULL;
9092         }
9093         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9094
9095         /* Signal HW programming completion */
9096         drm_atomic_helper_commit_hw_done(state);
9097
9098         if (wait_for_vblank)
9099                 drm_atomic_helper_wait_for_flip_done(dev, state);
9100
9101         drm_atomic_helper_cleanup_planes(dev, state);
9102
9103         /* return the stolen vga memory back to VRAM */
9104         if (!adev->mman.keep_stolen_vga_memory)
9105                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9106         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9107
9108         /*
9109          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9110          * so we can put the GPU into runtime suspend if we're not driving any
9111          * displays anymore
9112          */
9113         for (i = 0; i < crtc_disable_count; i++)
9114                 pm_runtime_put_autosuspend(dev->dev);
9115         pm_runtime_mark_last_busy(dev->dev);
9116
9117         if (dc_state_temp)
9118                 dc_release_state(dc_state_temp);
9119 }
9120
9121
9122 static int dm_force_atomic_commit(struct drm_connector *connector)
9123 {
9124         int ret = 0;
9125         struct drm_device *ddev = connector->dev;
9126         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9127         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9128         struct drm_plane *plane = disconnected_acrtc->base.primary;
9129         struct drm_connector_state *conn_state;
9130         struct drm_crtc_state *crtc_state;
9131         struct drm_plane_state *plane_state;
9132
9133         if (!state)
9134                 return -ENOMEM;
9135
9136         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9137
9138         /* Construct an atomic state to restore previous display setting */
9139
9140         /*
9141          * Attach connectors to drm_atomic_state
9142          */
9143         conn_state = drm_atomic_get_connector_state(state, connector);
9144
9145         ret = PTR_ERR_OR_ZERO(conn_state);
9146         if (ret)
9147                 goto out;
9148
9149         /* Attach crtc to drm_atomic_state*/
9150         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9151
9152         ret = PTR_ERR_OR_ZERO(crtc_state);
9153         if (ret)
9154                 goto out;
9155
9156         /* force a restore */
9157         crtc_state->mode_changed = true;
9158
9159         /* Attach plane to drm_atomic_state */
9160         plane_state = drm_atomic_get_plane_state(state, plane);
9161
9162         ret = PTR_ERR_OR_ZERO(plane_state);
9163         if (ret)
9164                 goto out;
9165
9166         /* Call commit internally with the state we just constructed */
9167         ret = drm_atomic_commit(state);
9168
9169 out:
9170         drm_atomic_state_put(state);
9171         if (ret)
9172                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9173
9174         return ret;
9175 }
9176
9177 /*
9178  * This function handles all cases when set mode does not come upon hotplug.
9179  * This includes when a display is unplugged then plugged back into the
9180  * same port and when running without usermode desktop manager supprot
9181  */
9182 void dm_restore_drm_connector_state(struct drm_device *dev,
9183                                     struct drm_connector *connector)
9184 {
9185         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9186         struct amdgpu_crtc *disconnected_acrtc;
9187         struct dm_crtc_state *acrtc_state;
9188
9189         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9190                 return;
9191
9192         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9193         if (!disconnected_acrtc)
9194                 return;
9195
9196         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9197         if (!acrtc_state->stream)
9198                 return;
9199
9200         /*
9201          * If the previous sink is not released and different from the current,
9202          * we deduce we are in a state where we can not rely on usermode call
9203          * to turn on the display, so we do it here
9204          */
9205         if (acrtc_state->stream->sink != aconnector->dc_sink)
9206                 dm_force_atomic_commit(&aconnector->base);
9207 }
9208
9209 /*
9210  * Grabs all modesetting locks to serialize against any blocking commits,
9211  * Waits for completion of all non blocking commits.
9212  */
9213 static int do_aquire_global_lock(struct drm_device *dev,
9214                                  struct drm_atomic_state *state)
9215 {
9216         struct drm_crtc *crtc;
9217         struct drm_crtc_commit *commit;
9218         long ret;
9219
9220         /*
9221          * Adding all modeset locks to aquire_ctx will
9222          * ensure that when the framework release it the
9223          * extra locks we are locking here will get released to
9224          */
9225         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9226         if (ret)
9227                 return ret;
9228
9229         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9230                 spin_lock(&crtc->commit_lock);
9231                 commit = list_first_entry_or_null(&crtc->commit_list,
9232                                 struct drm_crtc_commit, commit_entry);
9233                 if (commit)
9234                         drm_crtc_commit_get(commit);
9235                 spin_unlock(&crtc->commit_lock);
9236
9237                 if (!commit)
9238                         continue;
9239
9240                 /*
9241                  * Make sure all pending HW programming completed and
9242                  * page flips done
9243                  */
9244                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9245
9246                 if (ret > 0)
9247                         ret = wait_for_completion_interruptible_timeout(
9248                                         &commit->flip_done, 10*HZ);
9249
9250                 if (ret == 0)
9251                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9252                                   "timed out\n", crtc->base.id, crtc->name);
9253
9254                 drm_crtc_commit_put(commit);
9255         }
9256
9257         return ret < 0 ? ret : 0;
9258 }
9259
9260 static void get_freesync_config_for_crtc(
9261         struct dm_crtc_state *new_crtc_state,
9262         struct dm_connector_state *new_con_state)
9263 {
9264         struct mod_freesync_config config = {0};
9265         struct amdgpu_dm_connector *aconnector =
9266                         to_amdgpu_dm_connector(new_con_state->base.connector);
9267         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9268         int vrefresh = drm_mode_vrefresh(mode);
9269         bool fs_vid_mode = false;
9270
9271         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9272                                         vrefresh >= aconnector->min_vfreq &&
9273                                         vrefresh <= aconnector->max_vfreq;
9274
9275         if (new_crtc_state->vrr_supported) {
9276                 new_crtc_state->stream->ignore_msa_timing_param = true;
9277                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9278
9279                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9280                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9281                 config.vsif_supported = true;
9282                 config.btr = true;
9283
9284                 if (fs_vid_mode) {
9285                         config.state = VRR_STATE_ACTIVE_FIXED;
9286                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9287                         goto out;
9288                 } else if (new_crtc_state->base.vrr_enabled) {
9289                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9290                 } else {
9291                         config.state = VRR_STATE_INACTIVE;
9292                 }
9293         }
9294 out:
9295         new_crtc_state->freesync_config = config;
9296 }
9297
9298 static void reset_freesync_config_for_crtc(
9299         struct dm_crtc_state *new_crtc_state)
9300 {
9301         new_crtc_state->vrr_supported = false;
9302
9303         memset(&new_crtc_state->vrr_infopacket, 0,
9304                sizeof(new_crtc_state->vrr_infopacket));
9305 }
9306
9307 static bool
9308 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9309                                  struct drm_crtc_state *new_crtc_state)
9310 {
9311         struct drm_display_mode old_mode, new_mode;
9312
9313         if (!old_crtc_state || !new_crtc_state)
9314                 return false;
9315
9316         old_mode = old_crtc_state->mode;
9317         new_mode = new_crtc_state->mode;
9318
9319         if (old_mode.clock       == new_mode.clock &&
9320             old_mode.hdisplay    == new_mode.hdisplay &&
9321             old_mode.vdisplay    == new_mode.vdisplay &&
9322             old_mode.htotal      == new_mode.htotal &&
9323             old_mode.vtotal      != new_mode.vtotal &&
9324             old_mode.hsync_start == new_mode.hsync_start &&
9325             old_mode.vsync_start != new_mode.vsync_start &&
9326             old_mode.hsync_end   == new_mode.hsync_end &&
9327             old_mode.vsync_end   != new_mode.vsync_end &&
9328             old_mode.hskew       == new_mode.hskew &&
9329             old_mode.vscan       == new_mode.vscan &&
9330             (old_mode.vsync_end - old_mode.vsync_start) ==
9331             (new_mode.vsync_end - new_mode.vsync_start))
9332                 return true;
9333
9334         return false;
9335 }
9336
9337 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9338         uint64_t num, den, res;
9339         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9340
9341         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9342
9343         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9344         den = (unsigned long long)new_crtc_state->mode.htotal *
9345               (unsigned long long)new_crtc_state->mode.vtotal;
9346
9347         res = div_u64(num, den);
9348         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9349 }
9350
9351 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9352                                 struct drm_atomic_state *state,
9353                                 struct drm_crtc *crtc,
9354                                 struct drm_crtc_state *old_crtc_state,
9355                                 struct drm_crtc_state *new_crtc_state,
9356                                 bool enable,
9357                                 bool *lock_and_validation_needed)
9358 {
9359         struct dm_atomic_state *dm_state = NULL;
9360         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9361         struct dc_stream_state *new_stream;
9362         int ret = 0;
9363
9364         /*
9365          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9366          * update changed items
9367          */
9368         struct amdgpu_crtc *acrtc = NULL;
9369         struct amdgpu_dm_connector *aconnector = NULL;
9370         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9371         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9372
9373         new_stream = NULL;
9374
9375         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9376         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9377         acrtc = to_amdgpu_crtc(crtc);
9378         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9379
9380         /* TODO This hack should go away */
9381         if (aconnector && enable) {
9382                 /* Make sure fake sink is created in plug-in scenario */
9383                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9384                                                             &aconnector->base);
9385                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9386                                                             &aconnector->base);
9387
9388                 if (IS_ERR(drm_new_conn_state)) {
9389                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9390                         goto fail;
9391                 }
9392
9393                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9394                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9395
9396                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9397                         goto skip_modeset;
9398
9399                 new_stream = create_validate_stream_for_sink(aconnector,
9400                                                              &new_crtc_state->mode,
9401                                                              dm_new_conn_state,
9402                                                              dm_old_crtc_state->stream);
9403
9404                 /*
9405                  * we can have no stream on ACTION_SET if a display
9406                  * was disconnected during S3, in this case it is not an
9407                  * error, the OS will be updated after detection, and
9408                  * will do the right thing on next atomic commit
9409                  */
9410
9411                 if (!new_stream) {
9412                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9413                                         __func__, acrtc->base.base.id);
9414                         ret = -ENOMEM;
9415                         goto fail;
9416                 }
9417
9418                 /*
9419                  * TODO: Check VSDB bits to decide whether this should
9420                  * be enabled or not.
9421                  */
9422                 new_stream->triggered_crtc_reset.enabled =
9423                         dm->force_timing_sync;
9424
9425                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9426
9427                 ret = fill_hdr_info_packet(drm_new_conn_state,
9428                                            &new_stream->hdr_static_metadata);
9429                 if (ret)
9430                         goto fail;
9431
9432                 /*
9433                  * If we already removed the old stream from the context
9434                  * (and set the new stream to NULL) then we can't reuse
9435                  * the old stream even if the stream and scaling are unchanged.
9436                  * We'll hit the BUG_ON and black screen.
9437                  *
9438                  * TODO: Refactor this function to allow this check to work
9439                  * in all conditions.
9440                  */
9441                 if (amdgpu_freesync_vid_mode &&
9442                     dm_new_crtc_state->stream &&
9443                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9444                         goto skip_modeset;
9445
9446                 if (dm_new_crtc_state->stream &&
9447                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9448                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9449                         new_crtc_state->mode_changed = false;
9450                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9451                                          new_crtc_state->mode_changed);
9452                 }
9453         }
9454
9455         /* mode_changed flag may get updated above, need to check again */
9456         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9457                 goto skip_modeset;
9458
9459         DRM_DEBUG_ATOMIC(
9460                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9461                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9462                 "connectors_changed:%d\n",
9463                 acrtc->crtc_id,
9464                 new_crtc_state->enable,
9465                 new_crtc_state->active,
9466                 new_crtc_state->planes_changed,
9467                 new_crtc_state->mode_changed,
9468                 new_crtc_state->active_changed,
9469                 new_crtc_state->connectors_changed);
9470
9471         /* Remove stream for any changed/disabled CRTC */
9472         if (!enable) {
9473
9474                 if (!dm_old_crtc_state->stream)
9475                         goto skip_modeset;
9476
9477                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9478                     is_timing_unchanged_for_freesync(new_crtc_state,
9479                                                      old_crtc_state)) {
9480                         new_crtc_state->mode_changed = false;
9481                         DRM_DEBUG_DRIVER(
9482                                 "Mode change not required for front porch change, "
9483                                 "setting mode_changed to %d",
9484                                 new_crtc_state->mode_changed);
9485
9486                         set_freesync_fixed_config(dm_new_crtc_state);
9487
9488                         goto skip_modeset;
9489                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9490                            is_freesync_video_mode(&new_crtc_state->mode,
9491                                                   aconnector)) {
9492                         set_freesync_fixed_config(dm_new_crtc_state);
9493                 }
9494
9495                 ret = dm_atomic_get_state(state, &dm_state);
9496                 if (ret)
9497                         goto fail;
9498
9499                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9500                                 crtc->base.id);
9501
9502                 /* i.e. reset mode */
9503                 if (dc_remove_stream_from_ctx(
9504                                 dm->dc,
9505                                 dm_state->context,
9506                                 dm_old_crtc_state->stream) != DC_OK) {
9507                         ret = -EINVAL;
9508                         goto fail;
9509                 }
9510
9511                 dc_stream_release(dm_old_crtc_state->stream);
9512                 dm_new_crtc_state->stream = NULL;
9513
9514                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9515
9516                 *lock_and_validation_needed = true;
9517
9518         } else {/* Add stream for any updated/enabled CRTC */
9519                 /*
9520                  * Quick fix to prevent NULL pointer on new_stream when
9521                  * added MST connectors not found in existing crtc_state in the chained mode
9522                  * TODO: need to dig out the root cause of that
9523                  */
9524                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9525                         goto skip_modeset;
9526
9527                 if (modereset_required(new_crtc_state))
9528                         goto skip_modeset;
9529
9530                 if (modeset_required(new_crtc_state, new_stream,
9531                                      dm_old_crtc_state->stream)) {
9532
9533                         WARN_ON(dm_new_crtc_state->stream);
9534
9535                         ret = dm_atomic_get_state(state, &dm_state);
9536                         if (ret)
9537                                 goto fail;
9538
9539                         dm_new_crtc_state->stream = new_stream;
9540
9541                         dc_stream_retain(new_stream);
9542
9543                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9544                                          crtc->base.id);
9545
9546                         if (dc_add_stream_to_ctx(
9547                                         dm->dc,
9548                                         dm_state->context,
9549                                         dm_new_crtc_state->stream) != DC_OK) {
9550                                 ret = -EINVAL;
9551                                 goto fail;
9552                         }
9553
9554                         *lock_and_validation_needed = true;
9555                 }
9556         }
9557
9558 skip_modeset:
9559         /* Release extra reference */
9560         if (new_stream)
9561                  dc_stream_release(new_stream);
9562
9563         /*
9564          * We want to do dc stream updates that do not require a
9565          * full modeset below.
9566          */
9567         if (!(enable && aconnector && new_crtc_state->active))
9568                 return 0;
9569         /*
9570          * Given above conditions, the dc state cannot be NULL because:
9571          * 1. We're in the process of enabling CRTCs (just been added
9572          *    to the dc context, or already is on the context)
9573          * 2. Has a valid connector attached, and
9574          * 3. Is currently active and enabled.
9575          * => The dc stream state currently exists.
9576          */
9577         BUG_ON(dm_new_crtc_state->stream == NULL);
9578
9579         /* Scaling or underscan settings */
9580         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9581                 update_stream_scaling_settings(
9582                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9583
9584         /* ABM settings */
9585         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9586
9587         /*
9588          * Color management settings. We also update color properties
9589          * when a modeset is needed, to ensure it gets reprogrammed.
9590          */
9591         if (dm_new_crtc_state->base.color_mgmt_changed ||
9592             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9593                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9594                 if (ret)
9595                         goto fail;
9596         }
9597
9598         /* Update Freesync settings. */
9599         get_freesync_config_for_crtc(dm_new_crtc_state,
9600                                      dm_new_conn_state);
9601
9602         return ret;
9603
9604 fail:
9605         if (new_stream)
9606                 dc_stream_release(new_stream);
9607         return ret;
9608 }
9609
9610 static bool should_reset_plane(struct drm_atomic_state *state,
9611                                struct drm_plane *plane,
9612                                struct drm_plane_state *old_plane_state,
9613                                struct drm_plane_state *new_plane_state)
9614 {
9615         struct drm_plane *other;
9616         struct drm_plane_state *old_other_state, *new_other_state;
9617         struct drm_crtc_state *new_crtc_state;
9618         int i;
9619
9620         /*
9621          * TODO: Remove this hack once the checks below are sufficient
9622          * enough to determine when we need to reset all the planes on
9623          * the stream.
9624          */
9625         if (state->allow_modeset)
9626                 return true;
9627
9628         /* Exit early if we know that we're adding or removing the plane. */
9629         if (old_plane_state->crtc != new_plane_state->crtc)
9630                 return true;
9631
9632         /* old crtc == new_crtc == NULL, plane not in context. */
9633         if (!new_plane_state->crtc)
9634                 return false;
9635
9636         new_crtc_state =
9637                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9638
9639         if (!new_crtc_state)
9640                 return true;
9641
9642         /* CRTC Degamma changes currently require us to recreate planes. */
9643         if (new_crtc_state->color_mgmt_changed)
9644                 return true;
9645
9646         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9647                 return true;
9648
9649         /*
9650          * If there are any new primary or overlay planes being added or
9651          * removed then the z-order can potentially change. To ensure
9652          * correct z-order and pipe acquisition the current DC architecture
9653          * requires us to remove and recreate all existing planes.
9654          *
9655          * TODO: Come up with a more elegant solution for this.
9656          */
9657         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9658                 struct amdgpu_framebuffer *old_afb, *new_afb;
9659                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9660                         continue;
9661
9662                 if (old_other_state->crtc != new_plane_state->crtc &&
9663                     new_other_state->crtc != new_plane_state->crtc)
9664                         continue;
9665
9666                 if (old_other_state->crtc != new_other_state->crtc)
9667                         return true;
9668
9669                 /* Src/dst size and scaling updates. */
9670                 if (old_other_state->src_w != new_other_state->src_w ||
9671                     old_other_state->src_h != new_other_state->src_h ||
9672                     old_other_state->crtc_w != new_other_state->crtc_w ||
9673                     old_other_state->crtc_h != new_other_state->crtc_h)
9674                         return true;
9675
9676                 /* Rotation / mirroring updates. */
9677                 if (old_other_state->rotation != new_other_state->rotation)
9678                         return true;
9679
9680                 /* Blending updates. */
9681                 if (old_other_state->pixel_blend_mode !=
9682                     new_other_state->pixel_blend_mode)
9683                         return true;
9684
9685                 /* Alpha updates. */
9686                 if (old_other_state->alpha != new_other_state->alpha)
9687                         return true;
9688
9689                 /* Colorspace changes. */
9690                 if (old_other_state->color_range != new_other_state->color_range ||
9691                     old_other_state->color_encoding != new_other_state->color_encoding)
9692                         return true;
9693
9694                 /* Framebuffer checks fall at the end. */
9695                 if (!old_other_state->fb || !new_other_state->fb)
9696                         continue;
9697
9698                 /* Pixel format changes can require bandwidth updates. */
9699                 if (old_other_state->fb->format != new_other_state->fb->format)
9700                         return true;
9701
9702                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9703                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9704
9705                 /* Tiling and DCC changes also require bandwidth updates. */
9706                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9707                     old_afb->base.modifier != new_afb->base.modifier)
9708                         return true;
9709         }
9710
9711         return false;
9712 }
9713
9714 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9715                               struct drm_plane_state *new_plane_state,
9716                               struct drm_framebuffer *fb)
9717 {
9718         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9719         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9720         unsigned int pitch;
9721         bool linear;
9722
9723         if (fb->width > new_acrtc->max_cursor_width ||
9724             fb->height > new_acrtc->max_cursor_height) {
9725                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9726                                  new_plane_state->fb->width,
9727                                  new_plane_state->fb->height);
9728                 return -EINVAL;
9729         }
9730         if (new_plane_state->src_w != fb->width << 16 ||
9731             new_plane_state->src_h != fb->height << 16) {
9732                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9733                 return -EINVAL;
9734         }
9735
9736         /* Pitch in pixels */
9737         pitch = fb->pitches[0] / fb->format->cpp[0];
9738
9739         if (fb->width != pitch) {
9740                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9741                                  fb->width, pitch);
9742                 return -EINVAL;
9743         }
9744
9745         switch (pitch) {
9746         case 64:
9747         case 128:
9748         case 256:
9749                 /* FB pitch is supported by cursor plane */
9750                 break;
9751         default:
9752                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9753                 return -EINVAL;
9754         }
9755
9756         /* Core DRM takes care of checking FB modifiers, so we only need to
9757          * check tiling flags when the FB doesn't have a modifier. */
9758         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9759                 if (adev->family < AMDGPU_FAMILY_AI) {
9760                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9761                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9762                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9763                 } else {
9764                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9765                 }
9766                 if (!linear) {
9767                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9768                         return -EINVAL;
9769                 }
9770         }
9771
9772         return 0;
9773 }
9774
9775 static int dm_update_plane_state(struct dc *dc,
9776                                  struct drm_atomic_state *state,
9777                                  struct drm_plane *plane,
9778                                  struct drm_plane_state *old_plane_state,
9779                                  struct drm_plane_state *new_plane_state,
9780                                  bool enable,
9781                                  bool *lock_and_validation_needed)
9782 {
9783
9784         struct dm_atomic_state *dm_state = NULL;
9785         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9786         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9787         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9788         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9789         struct amdgpu_crtc *new_acrtc;
9790         bool needs_reset;
9791         int ret = 0;
9792
9793
9794         new_plane_crtc = new_plane_state->crtc;
9795         old_plane_crtc = old_plane_state->crtc;
9796         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9797         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9798
9799         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9800                 if (!enable || !new_plane_crtc ||
9801                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9802                         return 0;
9803
9804                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9805
9806                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9807                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9808                         return -EINVAL;
9809                 }
9810
9811                 if (new_plane_state->fb) {
9812                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9813                                                  new_plane_state->fb);
9814                         if (ret)
9815                                 return ret;
9816                 }
9817
9818                 return 0;
9819         }
9820
9821         needs_reset = should_reset_plane(state, plane, old_plane_state,
9822                                          new_plane_state);
9823
9824         /* Remove any changed/removed planes */
9825         if (!enable) {
9826                 if (!needs_reset)
9827                         return 0;
9828
9829                 if (!old_plane_crtc)
9830                         return 0;
9831
9832                 old_crtc_state = drm_atomic_get_old_crtc_state(
9833                                 state, old_plane_crtc);
9834                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9835
9836                 if (!dm_old_crtc_state->stream)
9837                         return 0;
9838
9839                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9840                                 plane->base.id, old_plane_crtc->base.id);
9841
9842                 ret = dm_atomic_get_state(state, &dm_state);
9843                 if (ret)
9844                         return ret;
9845
9846                 if (!dc_remove_plane_from_context(
9847                                 dc,
9848                                 dm_old_crtc_state->stream,
9849                                 dm_old_plane_state->dc_state,
9850                                 dm_state->context)) {
9851
9852                         return -EINVAL;
9853                 }
9854
9855
9856                 dc_plane_state_release(dm_old_plane_state->dc_state);
9857                 dm_new_plane_state->dc_state = NULL;
9858
9859                 *lock_and_validation_needed = true;
9860
9861         } else { /* Add new planes */
9862                 struct dc_plane_state *dc_new_plane_state;
9863
9864                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9865                         return 0;
9866
9867                 if (!new_plane_crtc)
9868                         return 0;
9869
9870                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9871                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9872
9873                 if (!dm_new_crtc_state->stream)
9874                         return 0;
9875
9876                 if (!needs_reset)
9877                         return 0;
9878
9879                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9880                 if (ret)
9881                         return ret;
9882
9883                 WARN_ON(dm_new_plane_state->dc_state);
9884
9885                 dc_new_plane_state = dc_create_plane_state(dc);
9886                 if (!dc_new_plane_state)
9887                         return -ENOMEM;
9888
9889                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9890                                  plane->base.id, new_plane_crtc->base.id);
9891
9892                 ret = fill_dc_plane_attributes(
9893                         drm_to_adev(new_plane_crtc->dev),
9894                         dc_new_plane_state,
9895                         new_plane_state,
9896                         new_crtc_state);
9897                 if (ret) {
9898                         dc_plane_state_release(dc_new_plane_state);
9899                         return ret;
9900                 }
9901
9902                 ret = dm_atomic_get_state(state, &dm_state);
9903                 if (ret) {
9904                         dc_plane_state_release(dc_new_plane_state);
9905                         return ret;
9906                 }
9907
9908                 /*
9909                  * Any atomic check errors that occur after this will
9910                  * not need a release. The plane state will be attached
9911                  * to the stream, and therefore part of the atomic
9912                  * state. It'll be released when the atomic state is
9913                  * cleaned.
9914                  */
9915                 if (!dc_add_plane_to_context(
9916                                 dc,
9917                                 dm_new_crtc_state->stream,
9918                                 dc_new_plane_state,
9919                                 dm_state->context)) {
9920
9921                         dc_plane_state_release(dc_new_plane_state);
9922                         return -EINVAL;
9923                 }
9924
9925                 dm_new_plane_state->dc_state = dc_new_plane_state;
9926
9927                 /* Tell DC to do a full surface update every time there
9928                  * is a plane change. Inefficient, but works for now.
9929                  */
9930                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9931
9932                 *lock_and_validation_needed = true;
9933         }
9934
9935
9936         return ret;
9937 }
9938
9939 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9940                                 struct drm_crtc *crtc,
9941                                 struct drm_crtc_state *new_crtc_state)
9942 {
9943         struct drm_plane_state *new_cursor_state, *new_primary_state;
9944         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9945
9946         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9947          * cursor per pipe but it's going to inherit the scaling and
9948          * positioning from the underlying pipe. Check the cursor plane's
9949          * blending properties match the primary plane's. */
9950
9951         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9952         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9953         if (!new_cursor_state || !new_primary_state ||
9954             !new_cursor_state->fb || !new_primary_state->fb) {
9955                 return 0;
9956         }
9957
9958         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9959                          (new_cursor_state->src_w >> 16);
9960         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9961                          (new_cursor_state->src_h >> 16);
9962
9963         primary_scale_w = new_primary_state->crtc_w * 1000 /
9964                          (new_primary_state->src_w >> 16);
9965         primary_scale_h = new_primary_state->crtc_h * 1000 /
9966                          (new_primary_state->src_h >> 16);
9967
9968         if (cursor_scale_w != primary_scale_w ||
9969             cursor_scale_h != primary_scale_h) {
9970                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9971                 return -EINVAL;
9972         }
9973
9974         return 0;
9975 }
9976
9977 #if defined(CONFIG_DRM_AMD_DC_DCN)
9978 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9979 {
9980         struct drm_connector *connector;
9981         struct drm_connector_state *conn_state;
9982         struct amdgpu_dm_connector *aconnector = NULL;
9983         int i;
9984         for_each_new_connector_in_state(state, connector, conn_state, i) {
9985                 if (conn_state->crtc != crtc)
9986                         continue;
9987
9988                 aconnector = to_amdgpu_dm_connector(connector);
9989                 if (!aconnector->port || !aconnector->mst_port)
9990                         aconnector = NULL;
9991                 else
9992                         break;
9993         }
9994
9995         if (!aconnector)
9996                 return 0;
9997
9998         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9999 }
10000 #endif
10001
10002 static int validate_overlay(struct drm_atomic_state *state)
10003 {
10004         int i;
10005         struct drm_plane *plane;
10006         struct drm_plane_state *old_plane_state, *new_plane_state;
10007         struct drm_plane_state *primary_state, *overlay_state = NULL;
10008
10009         /* Check if primary plane is contained inside overlay */
10010         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10011                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10012                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10013                                 return 0;
10014
10015                         overlay_state = new_plane_state;
10016                         continue;
10017                 }
10018         }
10019
10020         /* check if we're making changes to the overlay plane */
10021         if (!overlay_state)
10022                 return 0;
10023
10024         /* check if overlay plane is enabled */
10025         if (!overlay_state->crtc)
10026                 return 0;
10027
10028         /* find the primary plane for the CRTC that the overlay is enabled on */
10029         primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10030         if (IS_ERR(primary_state))
10031                 return PTR_ERR(primary_state);
10032
10033         /* check if primary plane is enabled */
10034         if (!primary_state->crtc)
10035                 return 0;
10036
10037         /* Perform the bounds check to ensure the overlay plane covers the primary */
10038         if (primary_state->crtc_x < overlay_state->crtc_x ||
10039             primary_state->crtc_y < overlay_state->crtc_y ||
10040             primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10041             primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10042                 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10043                 return -EINVAL;
10044         }
10045
10046         return 0;
10047 }
10048
10049 /**
10050  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10051  * @dev: The DRM device
10052  * @state: The atomic state to commit
10053  *
10054  * Validate that the given atomic state is programmable by DC into hardware.
10055  * This involves constructing a &struct dc_state reflecting the new hardware
10056  * state we wish to commit, then querying DC to see if it is programmable. It's
10057  * important not to modify the existing DC state. Otherwise, atomic_check
10058  * may unexpectedly commit hardware changes.
10059  *
10060  * When validating the DC state, it's important that the right locks are
10061  * acquired. For full updates case which removes/adds/updates streams on one
10062  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10063  * that any such full update commit will wait for completion of any outstanding
10064  * flip using DRMs synchronization events.
10065  *
10066  * Note that DM adds the affected connectors for all CRTCs in state, when that
10067  * might not seem necessary. This is because DC stream creation requires the
10068  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10069  * be possible but non-trivial - a possible TODO item.
10070  *
10071  * Return: -Error code if validation failed.
10072  */
10073 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10074                                   struct drm_atomic_state *state)
10075 {
10076         struct amdgpu_device *adev = drm_to_adev(dev);
10077         struct dm_atomic_state *dm_state = NULL;
10078         struct dc *dc = adev->dm.dc;
10079         struct drm_connector *connector;
10080         struct drm_connector_state *old_con_state, *new_con_state;
10081         struct drm_crtc *crtc;
10082         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10083         struct drm_plane *plane;
10084         struct drm_plane_state *old_plane_state, *new_plane_state;
10085         enum dc_status status;
10086         int ret, i;
10087         bool lock_and_validation_needed = false;
10088         struct dm_crtc_state *dm_old_crtc_state;
10089
10090         trace_amdgpu_dm_atomic_check_begin(state);
10091
10092         ret = drm_atomic_helper_check_modeset(dev, state);
10093         if (ret)
10094                 goto fail;
10095
10096         /* Check connector changes */
10097         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10098                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10099                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10100
10101                 /* Skip connectors that are disabled or part of modeset already. */
10102                 if (!old_con_state->crtc && !new_con_state->crtc)
10103                         continue;
10104
10105                 if (!new_con_state->crtc)
10106                         continue;
10107
10108                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10109                 if (IS_ERR(new_crtc_state)) {
10110                         ret = PTR_ERR(new_crtc_state);
10111                         goto fail;
10112                 }
10113
10114                 if (dm_old_con_state->abm_level !=
10115                     dm_new_con_state->abm_level)
10116                         new_crtc_state->connectors_changed = true;
10117         }
10118
10119 #if defined(CONFIG_DRM_AMD_DC_DCN)
10120         if (dc_resource_is_dsc_encoding_supported(dc)) {
10121                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10122                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10123                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10124                                 if (ret)
10125                                         goto fail;
10126                         }
10127                 }
10128         }
10129 #endif
10130         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10131                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10132
10133                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10134                     !new_crtc_state->color_mgmt_changed &&
10135                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10136                         dm_old_crtc_state->dsc_force_changed == false)
10137                         continue;
10138
10139                 if (!new_crtc_state->enable)
10140                         continue;
10141
10142                 ret = drm_atomic_add_affected_connectors(state, crtc);
10143                 if (ret)
10144                         return ret;
10145
10146                 ret = drm_atomic_add_affected_planes(state, crtc);
10147                 if (ret)
10148                         goto fail;
10149
10150                 if (dm_old_crtc_state->dsc_force_changed)
10151                         new_crtc_state->mode_changed = true;
10152         }
10153
10154         /*
10155          * Add all primary and overlay planes on the CRTC to the state
10156          * whenever a plane is enabled to maintain correct z-ordering
10157          * and to enable fast surface updates.
10158          */
10159         drm_for_each_crtc(crtc, dev) {
10160                 bool modified = false;
10161
10162                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10163                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10164                                 continue;
10165
10166                         if (new_plane_state->crtc == crtc ||
10167                             old_plane_state->crtc == crtc) {
10168                                 modified = true;
10169                                 break;
10170                         }
10171                 }
10172
10173                 if (!modified)
10174                         continue;
10175
10176                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10177                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10178                                 continue;
10179
10180                         new_plane_state =
10181                                 drm_atomic_get_plane_state(state, plane);
10182
10183                         if (IS_ERR(new_plane_state)) {
10184                                 ret = PTR_ERR(new_plane_state);
10185                                 goto fail;
10186                         }
10187                 }
10188         }
10189
10190         /* Remove exiting planes if they are modified */
10191         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10192                 ret = dm_update_plane_state(dc, state, plane,
10193                                             old_plane_state,
10194                                             new_plane_state,
10195                                             false,
10196                                             &lock_and_validation_needed);
10197                 if (ret)
10198                         goto fail;
10199         }
10200
10201         /* Disable all crtcs which require disable */
10202         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10203                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10204                                            old_crtc_state,
10205                                            new_crtc_state,
10206                                            false,
10207                                            &lock_and_validation_needed);
10208                 if (ret)
10209                         goto fail;
10210         }
10211
10212         /* Enable all crtcs which require enable */
10213         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10214                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10215                                            old_crtc_state,
10216                                            new_crtc_state,
10217                                            true,
10218                                            &lock_and_validation_needed);
10219                 if (ret)
10220                         goto fail;
10221         }
10222
10223         ret = validate_overlay(state);
10224         if (ret)
10225                 goto fail;
10226
10227         /* Add new/modified planes */
10228         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10229                 ret = dm_update_plane_state(dc, state, plane,
10230                                             old_plane_state,
10231                                             new_plane_state,
10232                                             true,
10233                                             &lock_and_validation_needed);
10234                 if (ret)
10235                         goto fail;
10236         }
10237
10238         /* Run this here since we want to validate the streams we created */
10239         ret = drm_atomic_helper_check_planes(dev, state);
10240         if (ret)
10241                 goto fail;
10242
10243         /* Check cursor planes scaling */
10244         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10245                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10246                 if (ret)
10247                         goto fail;
10248         }
10249
10250         if (state->legacy_cursor_update) {
10251                 /*
10252                  * This is a fast cursor update coming from the plane update
10253                  * helper, check if it can be done asynchronously for better
10254                  * performance.
10255                  */
10256                 state->async_update =
10257                         !drm_atomic_helper_async_check(dev, state);
10258
10259                 /*
10260                  * Skip the remaining global validation if this is an async
10261                  * update. Cursor updates can be done without affecting
10262                  * state or bandwidth calcs and this avoids the performance
10263                  * penalty of locking the private state object and
10264                  * allocating a new dc_state.
10265                  */
10266                 if (state->async_update)
10267                         return 0;
10268         }
10269
10270         /* Check scaling and underscan changes*/
10271         /* TODO Removed scaling changes validation due to inability to commit
10272          * new stream into context w\o causing full reset. Need to
10273          * decide how to handle.
10274          */
10275         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10276                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10277                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10278                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10279
10280                 /* Skip any modesets/resets */
10281                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10282                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10283                         continue;
10284
10285                 /* Skip any thing not scale or underscan changes */
10286                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10287                         continue;
10288
10289                 lock_and_validation_needed = true;
10290         }
10291
10292         /**
10293          * Streams and planes are reset when there are changes that affect
10294          * bandwidth. Anything that affects bandwidth needs to go through
10295          * DC global validation to ensure that the configuration can be applied
10296          * to hardware.
10297          *
10298          * We have to currently stall out here in atomic_check for outstanding
10299          * commits to finish in this case because our IRQ handlers reference
10300          * DRM state directly - we can end up disabling interrupts too early
10301          * if we don't.
10302          *
10303          * TODO: Remove this stall and drop DM state private objects.
10304          */
10305         if (lock_and_validation_needed) {
10306                 ret = dm_atomic_get_state(state, &dm_state);
10307                 if (ret)
10308                         goto fail;
10309
10310                 ret = do_aquire_global_lock(dev, state);
10311                 if (ret)
10312                         goto fail;
10313
10314 #if defined(CONFIG_DRM_AMD_DC_DCN)
10315                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10316                         goto fail;
10317
10318                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10319                 if (ret)
10320                         goto fail;
10321 #endif
10322
10323                 /*
10324                  * Perform validation of MST topology in the state:
10325                  * We need to perform MST atomic check before calling
10326                  * dc_validate_global_state(), or there is a chance
10327                  * to get stuck in an infinite loop and hang eventually.
10328                  */
10329                 ret = drm_dp_mst_atomic_check(state);
10330                 if (ret)
10331                         goto fail;
10332                 status = dc_validate_global_state(dc, dm_state->context, false);
10333                 if (status != DC_OK) {
10334                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10335                                        dc_status_to_str(status), status);
10336                         ret = -EINVAL;
10337                         goto fail;
10338                 }
10339         } else {
10340                 /*
10341                  * The commit is a fast update. Fast updates shouldn't change
10342                  * the DC context, affect global validation, and can have their
10343                  * commit work done in parallel with other commits not touching
10344                  * the same resource. If we have a new DC context as part of
10345                  * the DM atomic state from validation we need to free it and
10346                  * retain the existing one instead.
10347                  *
10348                  * Furthermore, since the DM atomic state only contains the DC
10349                  * context and can safely be annulled, we can free the state
10350                  * and clear the associated private object now to free
10351                  * some memory and avoid a possible use-after-free later.
10352                  */
10353
10354                 for (i = 0; i < state->num_private_objs; i++) {
10355                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10356
10357                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10358                                 int j = state->num_private_objs-1;
10359
10360                                 dm_atomic_destroy_state(obj,
10361                                                 state->private_objs[i].state);
10362
10363                                 /* If i is not at the end of the array then the
10364                                  * last element needs to be moved to where i was
10365                                  * before the array can safely be truncated.
10366                                  */
10367                                 if (i != j)
10368                                         state->private_objs[i] =
10369                                                 state->private_objs[j];
10370
10371                                 state->private_objs[j].ptr = NULL;
10372                                 state->private_objs[j].state = NULL;
10373                                 state->private_objs[j].old_state = NULL;
10374                                 state->private_objs[j].new_state = NULL;
10375
10376                                 state->num_private_objs = j;
10377                                 break;
10378                         }
10379                 }
10380         }
10381
10382         /* Store the overall update type for use later in atomic check. */
10383         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10384                 struct dm_crtc_state *dm_new_crtc_state =
10385                         to_dm_crtc_state(new_crtc_state);
10386
10387                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10388                                                          UPDATE_TYPE_FULL :
10389                                                          UPDATE_TYPE_FAST;
10390         }
10391
10392         /* Must be success */
10393         WARN_ON(ret);
10394
10395         trace_amdgpu_dm_atomic_check_finish(state, ret);
10396
10397         return ret;
10398
10399 fail:
10400         if (ret == -EDEADLK)
10401                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10402         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10403                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10404         else
10405                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10406
10407         trace_amdgpu_dm_atomic_check_finish(state, ret);
10408
10409         return ret;
10410 }
10411
10412 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10413                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10414 {
10415         uint8_t dpcd_data;
10416         bool capable = false;
10417
10418         if (amdgpu_dm_connector->dc_link &&
10419                 dm_helpers_dp_read_dpcd(
10420                                 NULL,
10421                                 amdgpu_dm_connector->dc_link,
10422                                 DP_DOWN_STREAM_PORT_COUNT,
10423                                 &dpcd_data,
10424                                 sizeof(dpcd_data))) {
10425                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10426         }
10427
10428         return capable;
10429 }
10430
10431 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10432                 uint8_t *edid_ext, int len,
10433                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10434 {
10435         int i;
10436         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10437         struct dc *dc = adev->dm.dc;
10438
10439         /* send extension block to DMCU for parsing */
10440         for (i = 0; i < len; i += 8) {
10441                 bool res;
10442                 int offset;
10443
10444                 /* send 8 bytes a time */
10445                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10446                         return false;
10447
10448                 if (i+8 == len) {
10449                         /* EDID block sent completed, expect result */
10450                         int version, min_rate, max_rate;
10451
10452                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10453                         if (res) {
10454                                 /* amd vsdb found */
10455                                 vsdb_info->freesync_supported = 1;
10456                                 vsdb_info->amd_vsdb_version = version;
10457                                 vsdb_info->min_refresh_rate_hz = min_rate;
10458                                 vsdb_info->max_refresh_rate_hz = max_rate;
10459                                 return true;
10460                         }
10461                         /* not amd vsdb */
10462                         return false;
10463                 }
10464
10465                 /* check for ack*/
10466                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10467                 if (!res)
10468                         return false;
10469         }
10470
10471         return false;
10472 }
10473
10474 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10475                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10476 {
10477         uint8_t *edid_ext = NULL;
10478         int i;
10479         bool valid_vsdb_found = false;
10480
10481         /*----- drm_find_cea_extension() -----*/
10482         /* No EDID or EDID extensions */
10483         if (edid == NULL || edid->extensions == 0)
10484                 return -ENODEV;
10485
10486         /* Find CEA extension */
10487         for (i = 0; i < edid->extensions; i++) {
10488                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10489                 if (edid_ext[0] == CEA_EXT)
10490                         break;
10491         }
10492
10493         if (i == edid->extensions)
10494                 return -ENODEV;
10495
10496         /*----- cea_db_offsets() -----*/
10497         if (edid_ext[0] != CEA_EXT)
10498                 return -ENODEV;
10499
10500         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10501
10502         return valid_vsdb_found ? i : -ENODEV;
10503 }
10504
10505 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10506                                         struct edid *edid)
10507 {
10508         int i = 0;
10509         struct detailed_timing *timing;
10510         struct detailed_non_pixel *data;
10511         struct detailed_data_monitor_range *range;
10512         struct amdgpu_dm_connector *amdgpu_dm_connector =
10513                         to_amdgpu_dm_connector(connector);
10514         struct dm_connector_state *dm_con_state = NULL;
10515
10516         struct drm_device *dev = connector->dev;
10517         struct amdgpu_device *adev = drm_to_adev(dev);
10518         bool freesync_capable = false;
10519         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10520
10521         if (!connector->state) {
10522                 DRM_ERROR("%s - Connector has no state", __func__);
10523                 goto update;
10524         }
10525
10526         if (!edid) {
10527                 dm_con_state = to_dm_connector_state(connector->state);
10528
10529                 amdgpu_dm_connector->min_vfreq = 0;
10530                 amdgpu_dm_connector->max_vfreq = 0;
10531                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10532
10533                 goto update;
10534         }
10535
10536         dm_con_state = to_dm_connector_state(connector->state);
10537
10538         if (!amdgpu_dm_connector->dc_sink) {
10539                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10540                 goto update;
10541         }
10542         if (!adev->dm.freesync_module)
10543                 goto update;
10544
10545
10546         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10547                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10548                 bool edid_check_required = false;
10549
10550                 if (edid) {
10551                         edid_check_required = is_dp_capable_without_timing_msa(
10552                                                 adev->dm.dc,
10553                                                 amdgpu_dm_connector);
10554                 }
10555
10556                 if (edid_check_required == true && (edid->version > 1 ||
10557                    (edid->version == 1 && edid->revision > 1))) {
10558                         for (i = 0; i < 4; i++) {
10559
10560                                 timing  = &edid->detailed_timings[i];
10561                                 data    = &timing->data.other_data;
10562                                 range   = &data->data.range;
10563                                 /*
10564                                  * Check if monitor has continuous frequency mode
10565                                  */
10566                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10567                                         continue;
10568                                 /*
10569                                  * Check for flag range limits only. If flag == 1 then
10570                                  * no additional timing information provided.
10571                                  * Default GTF, GTF Secondary curve and CVT are not
10572                                  * supported
10573                                  */
10574                                 if (range->flags != 1)
10575                                         continue;
10576
10577                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10578                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10579                                 amdgpu_dm_connector->pixel_clock_mhz =
10580                                         range->pixel_clock_mhz * 10;
10581
10582                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10583                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10584
10585                                 break;
10586                         }
10587
10588                         if (amdgpu_dm_connector->max_vfreq -
10589                             amdgpu_dm_connector->min_vfreq > 10) {
10590
10591                                 freesync_capable = true;
10592                         }
10593                 }
10594         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10595                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10596                 if (i >= 0 && vsdb_info.freesync_supported) {
10597                         timing  = &edid->detailed_timings[i];
10598                         data    = &timing->data.other_data;
10599
10600                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10601                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10602                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10603                                 freesync_capable = true;
10604
10605                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10606                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10607                 }
10608         }
10609
10610 update:
10611         if (dm_con_state)
10612                 dm_con_state->freesync_capable = freesync_capable;
10613
10614         if (connector->vrr_capable_property)
10615                 drm_connector_set_vrr_capable_property(connector,
10616                                                        freesync_capable);
10617 }
10618
10619 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10620 {
10621         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10622
10623         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10624                 return;
10625         if (link->type == dc_connection_none)
10626                 return;
10627         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10628                                         dpcd_data, sizeof(dpcd_data))) {
10629                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10630
10631                 if (dpcd_data[0] == 0) {
10632                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10633                         link->psr_settings.psr_feature_enabled = false;
10634                 } else {
10635                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10636                         link->psr_settings.psr_feature_enabled = true;
10637                 }
10638
10639                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10640         }
10641 }
10642
10643 /*
10644  * amdgpu_dm_link_setup_psr() - configure psr link
10645  * @stream: stream state
10646  *
10647  * Return: true if success
10648  */
10649 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10650 {
10651         struct dc_link *link = NULL;
10652         struct psr_config psr_config = {0};
10653         struct psr_context psr_context = {0};
10654         bool ret = false;
10655
10656         if (stream == NULL)
10657                 return false;
10658
10659         link = stream->link;
10660
10661         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10662
10663         if (psr_config.psr_version > 0) {
10664                 psr_config.psr_exit_link_training_required = 0x1;
10665                 psr_config.psr_frame_capture_indication_req = 0;
10666                 psr_config.psr_rfb_setup_time = 0x37;
10667                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10668                 psr_config.allow_smu_optimizations = 0x0;
10669
10670                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10671
10672         }
10673         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10674
10675         return ret;
10676 }
10677
10678 /*
10679  * amdgpu_dm_psr_enable() - enable psr f/w
10680  * @stream: stream state
10681  *
10682  * Return: true if success
10683  */
10684 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10685 {
10686         struct dc_link *link = stream->link;
10687         unsigned int vsync_rate_hz = 0;
10688         struct dc_static_screen_params params = {0};
10689         /* Calculate number of static frames before generating interrupt to
10690          * enter PSR.
10691          */
10692         // Init fail safe of 2 frames static
10693         unsigned int num_frames_static = 2;
10694
10695         DRM_DEBUG_DRIVER("Enabling psr...\n");
10696
10697         vsync_rate_hz = div64_u64(div64_u64((
10698                         stream->timing.pix_clk_100hz * 100),
10699                         stream->timing.v_total),
10700                         stream->timing.h_total);
10701
10702         /* Round up
10703          * Calculate number of frames such that at least 30 ms of time has
10704          * passed.
10705          */
10706         if (vsync_rate_hz != 0) {
10707                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10708                 num_frames_static = (30000 / frame_time_microsec) + 1;
10709         }
10710
10711         params.triggers.cursor_update = true;
10712         params.triggers.overlay_update = true;
10713         params.triggers.surface_update = true;
10714         params.num_frames = num_frames_static;
10715
10716         dc_stream_set_static_screen_params(link->ctx->dc,
10717                                            &stream, 1,
10718                                            &params);
10719
10720         return dc_link_set_psr_allow_active(link, true, false, false);
10721 }
10722
10723 /*
10724  * amdgpu_dm_psr_disable() - disable psr f/w
10725  * @stream:  stream state
10726  *
10727  * Return: true if success
10728  */
10729 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10730 {
10731
10732         DRM_DEBUG_DRIVER("Disabling psr...\n");
10733
10734         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10735 }
10736
10737 /*
10738  * amdgpu_dm_psr_disable() - disable psr f/w
10739  * if psr is enabled on any stream
10740  *
10741  * Return: true if success
10742  */
10743 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10744 {
10745         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10746         return dc_set_psr_allow_active(dm->dc, false);
10747 }
10748
10749 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10750 {
10751         struct amdgpu_device *adev = drm_to_adev(dev);
10752         struct dc *dc = adev->dm.dc;
10753         int i;
10754
10755         mutex_lock(&adev->dm.dc_lock);
10756         if (dc->current_state) {
10757                 for (i = 0; i < dc->current_state->stream_count; ++i)
10758                         dc->current_state->streams[i]
10759                                 ->triggered_crtc_reset.enabled =
10760                                 adev->dm.force_timing_sync;
10761
10762                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10763                 dc_trigger_sync(dc, dc->current_state);
10764         }
10765         mutex_unlock(&adev->dm.dc_lock);
10766 }
10767
10768 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10769                        uint32_t value, const char *func_name)
10770 {
10771 #ifdef DM_CHECK_ADDR_0
10772         if (address == 0) {
10773                 DC_ERR("invalid register write. address = 0");
10774                 return;
10775         }
10776 #endif
10777         cgs_write_register(ctx->cgs_device, address, value);
10778         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10779 }
10780
10781 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10782                           const char *func_name)
10783 {
10784         uint32_t value;
10785 #ifdef DM_CHECK_ADDR_0
10786         if (address == 0) {
10787                 DC_ERR("invalid register read; address = 0\n");
10788                 return 0;
10789         }
10790 #endif
10791
10792         if (ctx->dmub_srv &&
10793             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10794             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10795                 ASSERT(false);
10796                 return 0;
10797         }
10798
10799         value = cgs_read_register(ctx->cgs_device, address);
10800
10801         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10802
10803         return value;
10804 }
10805
10806 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10807                                 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10808 {
10809         struct amdgpu_device *adev = ctx->driver_context;
10810         int ret = 0;
10811
10812         dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10813         ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10814         if (ret == 0) {
10815                 *operation_result = AUX_RET_ERROR_TIMEOUT;
10816                 return -1;
10817         }
10818         *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10819
10820         if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10821                 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10822
10823                 // For read case, Copy data to payload
10824                 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10825                 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10826                         memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10827                         adev->dm.dmub_notify->aux_reply.length);
10828         }
10829
10830         return adev->dm.dmub_notify->aux_reply.length;
10831 }