drm/amd/display: Fix overlay validation by considering cursors
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61
62 #include "ivsrcid/ivsrcid_vislands30.h"
63
64 #include "i2caux_interface.h"
65 #include <linux/module.h>
66 #include <linux/moduleparam.h>
67 #include <linux/types.h>
68 #include <linux/pm_runtime.h>
69 #include <linux/pci.h>
70 #include <linux/firmware.h>
71 #include <linux/component.h>
72
73 #include <drm/drm_atomic.h>
74 #include <drm/drm_atomic_uapi.h>
75 #include <drm/drm_atomic_helper.h>
76 #include <drm/drm_dp_mst_helper.h>
77 #include <drm/drm_fb_helper.h>
78 #include <drm/drm_fourcc.h>
79 #include <drm/drm_edid.h>
80 #include <drm/drm_vblank.h>
81 #include <drm/drm_audio_component.h>
82
83 #if defined(CONFIG_DRM_AMD_DC_DCN)
84 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
85
86 #include "dcn/dcn_1_0_offset.h"
87 #include "dcn/dcn_1_0_sh_mask.h"
88 #include "soc15_hw_ip.h"
89 #include "vega10_ip_offset.h"
90
91 #include "soc15_common.h"
92 #endif
93
94 #include "modules/inc/mod_freesync.h"
95 #include "modules/power/power_helpers.h"
96 #include "modules/inc/mod_info_packet.h"
97
98 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
100 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
102 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
104 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
106 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
108 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
110 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
112
113 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
114 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
115
116 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
118
119 /* Number of bytes in PSP header for firmware. */
120 #define PSP_HEADER_BYTES 0x100
121
122 /* Number of bytes in PSP footer for firmware. */
123 #define PSP_FOOTER_BYTES 0x100
124
125 /**
126  * DOC: overview
127  *
128  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
129  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
130  * requests into DC requests, and DC responses into DRM responses.
131  *
132  * The root control structure is &struct amdgpu_display_manager.
133  */
134
135 /* basic init/fini API */
136 static int amdgpu_dm_init(struct amdgpu_device *adev);
137 static void amdgpu_dm_fini(struct amdgpu_device *adev);
138 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
139
140 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
141 {
142         switch (link->dpcd_caps.dongle_type) {
143         case DISPLAY_DONGLE_NONE:
144                 return DRM_MODE_SUBCONNECTOR_Native;
145         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
146                 return DRM_MODE_SUBCONNECTOR_VGA;
147         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
148         case DISPLAY_DONGLE_DP_DVI_DONGLE:
149                 return DRM_MODE_SUBCONNECTOR_DVID;
150         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
151         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
152                 return DRM_MODE_SUBCONNECTOR_HDMIA;
153         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
154         default:
155                 return DRM_MODE_SUBCONNECTOR_Unknown;
156         }
157 }
158
159 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
160 {
161         struct dc_link *link = aconnector->dc_link;
162         struct drm_connector *connector = &aconnector->base;
163         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
164
165         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
166                 return;
167
168         if (aconnector->dc_sink)
169                 subconnector = get_subconnector_type(link);
170
171         drm_object_property_set_value(&connector->base,
172                         connector->dev->mode_config.dp_subconnector_property,
173                         subconnector);
174 }
175
176 /*
177  * initializes drm_device display related structures, based on the information
178  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
179  * drm_encoder, drm_mode_config
180  *
181  * Returns 0 on success
182  */
183 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
184 /* removes and deallocates the drm structures, created by the above function */
185 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
186
187 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
188                                 struct drm_plane *plane,
189                                 unsigned long possible_crtcs,
190                                 const struct dc_plane_cap *plane_cap);
191 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
192                                struct drm_plane *plane,
193                                uint32_t link_index);
194 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
195                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
196                                     uint32_t link_index,
197                                     struct amdgpu_encoder *amdgpu_encoder);
198 static int amdgpu_dm_encoder_init(struct drm_device *dev,
199                                   struct amdgpu_encoder *aencoder,
200                                   uint32_t link_index);
201
202 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
203
204 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
205
206 static int amdgpu_dm_atomic_check(struct drm_device *dev,
207                                   struct drm_atomic_state *state);
208
209 static void handle_cursor_update(struct drm_plane *plane,
210                                  struct drm_plane_state *old_plane_state);
211
212 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
213 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
214 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
216 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
217
218 static const struct drm_format_info *
219 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
220
221 static bool
222 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223                                  struct drm_crtc_state *new_crtc_state);
224 /*
225  * dm_vblank_get_counter
226  *
227  * @brief
228  * Get counter for number of vertical blanks
229  *
230  * @param
231  * struct amdgpu_device *adev - [in] desired amdgpu device
232  * int disp_idx - [in] which CRTC to get the counter from
233  *
234  * @return
235  * Counter for vertical blanks
236  */
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238 {
239         if (crtc >= adev->mode_info.num_crtc)
240                 return 0;
241         else {
242                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243
244                 if (acrtc->dm_irq_params.stream == NULL) {
245                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246                                   crtc);
247                         return 0;
248                 }
249
250                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
251         }
252 }
253
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255                                   u32 *vbl, u32 *position)
256 {
257         uint32_t v_blank_start, v_blank_end, h_position, v_position;
258
259         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260                 return -EINVAL;
261         else {
262                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263
264                 if (acrtc->dm_irq_params.stream ==  NULL) {
265                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266                                   crtc);
267                         return 0;
268                 }
269
270                 /*
271                  * TODO rework base driver to use values directly.
272                  * for now parse it back into reg-format
273                  */
274                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
275                                          &v_blank_start,
276                                          &v_blank_end,
277                                          &h_position,
278                                          &v_position);
279
280                 *position = v_position | (h_position << 16);
281                 *vbl = v_blank_start | (v_blank_end << 16);
282         }
283
284         return 0;
285 }
286
287 static bool dm_is_idle(void *handle)
288 {
289         /* XXX todo */
290         return true;
291 }
292
293 static int dm_wait_for_idle(void *handle)
294 {
295         /* XXX todo */
296         return 0;
297 }
298
299 static bool dm_check_soft_reset(void *handle)
300 {
301         return false;
302 }
303
304 static int dm_soft_reset(void *handle)
305 {
306         /* XXX todo */
307         return 0;
308 }
309
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
312                      int otg_inst)
313 {
314         struct drm_device *dev = adev_to_drm(adev);
315         struct drm_crtc *crtc;
316         struct amdgpu_crtc *amdgpu_crtc;
317
318         if (otg_inst == -1) {
319                 WARN_ON(1);
320                 return adev->mode_info.crtcs[0];
321         }
322
323         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324                 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326                 if (amdgpu_crtc->otg_inst == otg_inst)
327                         return amdgpu_crtc;
328         }
329
330         return NULL;
331 }
332
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335         return acrtc->dm_irq_params.freesync_config.state ==
336                        VRR_STATE_ACTIVE_VARIABLE ||
337                acrtc->dm_irq_params.freesync_config.state ==
338                        VRR_STATE_ACTIVE_FIXED;
339 }
340
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348                                               struct dm_crtc_state *new_state)
349 {
350         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351                 return true;
352         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353                 return true;
354         else
355                 return false;
356 }
357
358 /**
359  * dm_pflip_high_irq() - Handle pageflip interrupt
360  * @interrupt_params: ignored
361  *
362  * Handles the pageflip interrupt by notifying all interested parties
363  * that the pageflip has been completed.
364  */
365 static void dm_pflip_high_irq(void *interrupt_params)
366 {
367         struct amdgpu_crtc *amdgpu_crtc;
368         struct common_irq_params *irq_params = interrupt_params;
369         struct amdgpu_device *adev = irq_params->adev;
370         unsigned long flags;
371         struct drm_pending_vblank_event *e;
372         uint32_t vpos, hpos, v_blank_start, v_blank_end;
373         bool vrr_active;
374
375         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377         /* IRQ could occur when in initial stage */
378         /* TODO work and BO cleanup */
379         if (amdgpu_crtc == NULL) {
380                 DC_LOG_PFLIP("CRTC is null, returning.\n");
381                 return;
382         }
383
384         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385
386         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388                                                  amdgpu_crtc->pflip_status,
389                                                  AMDGPU_FLIP_SUBMITTED,
390                                                  amdgpu_crtc->crtc_id,
391                                                  amdgpu_crtc);
392                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393                 return;
394         }
395
396         /* page flip completed. */
397         e = amdgpu_crtc->event;
398         amdgpu_crtc->event = NULL;
399
400         if (!e)
401                 WARN_ON(1);
402
403         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
404
405         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
406         if (!vrr_active ||
407             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
408                                       &v_blank_end, &hpos, &vpos) ||
409             (vpos < v_blank_start)) {
410                 /* Update to correct count and vblank timestamp if racing with
411                  * vblank irq. This also updates to the correct vblank timestamp
412                  * even in VRR mode, as scanout is past the front-porch atm.
413                  */
414                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
415
416                 /* Wake up userspace by sending the pageflip event with proper
417                  * count and timestamp of vblank of flip completion.
418                  */
419                 if (e) {
420                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
421
422                         /* Event sent, so done with vblank for this flip */
423                         drm_crtc_vblank_put(&amdgpu_crtc->base);
424                 }
425         } else if (e) {
426                 /* VRR active and inside front-porch: vblank count and
427                  * timestamp for pageflip event will only be up to date after
428                  * drm_crtc_handle_vblank() has been executed from late vblank
429                  * irq handler after start of back-porch (vline 0). We queue the
430                  * pageflip event for send-out by drm_crtc_handle_vblank() with
431                  * updated timestamp and count, once it runs after us.
432                  *
433                  * We need to open-code this instead of using the helper
434                  * drm_crtc_arm_vblank_event(), as that helper would
435                  * call drm_crtc_accurate_vblank_count(), which we must
436                  * not call in VRR mode while we are in front-porch!
437                  */
438
439                 /* sequence will be replaced by real count during send-out. */
440                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
441                 e->pipe = amdgpu_crtc->crtc_id;
442
443                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
444                 e = NULL;
445         }
446
447         /* Keep track of vblank of this flip for flip throttling. We use the
448          * cooked hw counter, as that one incremented at start of this vblank
449          * of pageflip completion, so last_flip_vblank is the forbidden count
450          * for queueing new pageflips if vsync + VRR is enabled.
451          */
452         amdgpu_crtc->dm_irq_params.last_flip_vblank =
453                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
454
455         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
456         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
457
458         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
459                      amdgpu_crtc->crtc_id, amdgpu_crtc,
460                      vrr_active, (int) !e);
461 }
462
463 static void dm_vupdate_high_irq(void *interrupt_params)
464 {
465         struct common_irq_params *irq_params = interrupt_params;
466         struct amdgpu_device *adev = irq_params->adev;
467         struct amdgpu_crtc *acrtc;
468         struct drm_device *drm_dev;
469         struct drm_vblank_crtc *vblank;
470         ktime_t frame_duration_ns, previous_timestamp;
471         unsigned long flags;
472         int vrr_active;
473
474         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
475
476         if (acrtc) {
477                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
478                 drm_dev = acrtc->base.dev;
479                 vblank = &drm_dev->vblank[acrtc->base.index];
480                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
481                 frame_duration_ns = vblank->time - previous_timestamp;
482
483                 if (frame_duration_ns > 0) {
484                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
485                                                 frame_duration_ns,
486                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
487                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
488                 }
489
490                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
491                               acrtc->crtc_id,
492                               vrr_active);
493
494                 /* Core vblank handling is done here after end of front-porch in
495                  * vrr mode, as vblank timestamping will give valid results
496                  * while now done after front-porch. This will also deliver
497                  * page-flip completion events that have been queued to us
498                  * if a pageflip happened inside front-porch.
499                  */
500                 if (vrr_active) {
501                         drm_crtc_handle_vblank(&acrtc->base);
502
503                         /* BTR processing for pre-DCE12 ASICs */
504                         if (acrtc->dm_irq_params.stream &&
505                             adev->family < AMDGPU_FAMILY_AI) {
506                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
507                                 mod_freesync_handle_v_update(
508                                     adev->dm.freesync_module,
509                                     acrtc->dm_irq_params.stream,
510                                     &acrtc->dm_irq_params.vrr_params);
511
512                                 dc_stream_adjust_vmin_vmax(
513                                     adev->dm.dc,
514                                     acrtc->dm_irq_params.stream,
515                                     &acrtc->dm_irq_params.vrr_params.adjust);
516                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
517                         }
518                 }
519         }
520 }
521
522 /**
523  * dm_crtc_high_irq() - Handles CRTC interrupt
524  * @interrupt_params: used for determining the CRTC instance
525  *
526  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
527  * event handler.
528  */
529 static void dm_crtc_high_irq(void *interrupt_params)
530 {
531         struct common_irq_params *irq_params = interrupt_params;
532         struct amdgpu_device *adev = irq_params->adev;
533         struct amdgpu_crtc *acrtc;
534         unsigned long flags;
535         int vrr_active;
536
537         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
538         if (!acrtc)
539                 return;
540
541         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
542
543         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
544                       vrr_active, acrtc->dm_irq_params.active_planes);
545
546         /**
547          * Core vblank handling at start of front-porch is only possible
548          * in non-vrr mode, as only there vblank timestamping will give
549          * valid results while done in front-porch. Otherwise defer it
550          * to dm_vupdate_high_irq after end of front-porch.
551          */
552         if (!vrr_active)
553                 drm_crtc_handle_vblank(&acrtc->base);
554
555         /**
556          * Following stuff must happen at start of vblank, for crc
557          * computation and below-the-range btr support in vrr mode.
558          */
559         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
560
561         /* BTR updates need to happen before VUPDATE on Vega and above. */
562         if (adev->family < AMDGPU_FAMILY_AI)
563                 return;
564
565         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
566
567         if (acrtc->dm_irq_params.stream &&
568             acrtc->dm_irq_params.vrr_params.supported &&
569             acrtc->dm_irq_params.freesync_config.state ==
570                     VRR_STATE_ACTIVE_VARIABLE) {
571                 mod_freesync_handle_v_update(adev->dm.freesync_module,
572                                              acrtc->dm_irq_params.stream,
573                                              &acrtc->dm_irq_params.vrr_params);
574
575                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
576                                            &acrtc->dm_irq_params.vrr_params.adjust);
577         }
578
579         /*
580          * If there aren't any active_planes then DCH HUBP may be clock-gated.
581          * In that case, pageflip completion interrupts won't fire and pageflip
582          * completion events won't get delivered. Prevent this by sending
583          * pending pageflip events from here if a flip is still pending.
584          *
585          * If any planes are enabled, use dm_pflip_high_irq() instead, to
586          * avoid race conditions between flip programming and completion,
587          * which could cause too early flip completion events.
588          */
589         if (adev->family >= AMDGPU_FAMILY_RV &&
590             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
591             acrtc->dm_irq_params.active_planes == 0) {
592                 if (acrtc->event) {
593                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
594                         acrtc->event = NULL;
595                         drm_crtc_vblank_put(&acrtc->base);
596                 }
597                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
598         }
599
600         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
601 }
602
603 #if defined(CONFIG_DRM_AMD_DC_DCN)
604 /**
605  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606  * DCN generation ASICs
607  * @interrupt params - interrupt parameters
608  *
609  * Used to set crc window/read out crc value at vertical line 0 position
610  */
611 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
612 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
613 {
614         struct common_irq_params *irq_params = interrupt_params;
615         struct amdgpu_device *adev = irq_params->adev;
616         struct amdgpu_crtc *acrtc;
617
618         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
619
620         if (!acrtc)
621                 return;
622
623         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
624 }
625 #endif
626
627 /**
628  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
629  * @interrupt_params: used for determining the Outbox instance
630  *
631  * Handles the Outbox Interrupt
632  * event handler.
633  */
634 #define DMUB_TRACE_MAX_READ 64
635 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
636 {
637         struct dmub_notification notify;
638         struct common_irq_params *irq_params = interrupt_params;
639         struct amdgpu_device *adev = irq_params->adev;
640         struct amdgpu_display_manager *dm = &adev->dm;
641         struct dmcub_trace_buf_entry entry = { 0 };
642         uint32_t count = 0;
643
644         if (dc_enable_dmub_notifications(adev->dm.dc)) {
645                 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
646                         do {
647                                 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
648                         } while (notify.pending_notification);
649
650                         if (adev->dm.dmub_notify)
651                                 memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
652                         if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
653                                 complete(&adev->dm.dmub_aux_transfer_done);
654                         // TODO : HPD Implementation
655
656                 } else {
657                         DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
658                 }
659         }
660
661
662         do {
663                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
664                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
665                                                         entry.param0, entry.param1);
666
667                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
668                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
669                 } else
670                         break;
671
672                 count++;
673
674         } while (count <= DMUB_TRACE_MAX_READ);
675
676         ASSERT(count <= DMUB_TRACE_MAX_READ);
677 }
678 #endif
679
680 static int dm_set_clockgating_state(void *handle,
681                   enum amd_clockgating_state state)
682 {
683         return 0;
684 }
685
686 static int dm_set_powergating_state(void *handle,
687                   enum amd_powergating_state state)
688 {
689         return 0;
690 }
691
692 /* Prototypes of private functions */
693 static int dm_early_init(void* handle);
694
695 /* Allocate memory for FBC compressed data  */
696 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
697 {
698         struct drm_device *dev = connector->dev;
699         struct amdgpu_device *adev = drm_to_adev(dev);
700         struct dm_compressor_info *compressor = &adev->dm.compressor;
701         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
702         struct drm_display_mode *mode;
703         unsigned long max_size = 0;
704
705         if (adev->dm.dc->fbc_compressor == NULL)
706                 return;
707
708         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
709                 return;
710
711         if (compressor->bo_ptr)
712                 return;
713
714
715         list_for_each_entry(mode, &connector->modes, head) {
716                 if (max_size < mode->htotal * mode->vtotal)
717                         max_size = mode->htotal * mode->vtotal;
718         }
719
720         if (max_size) {
721                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
722                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
723                             &compressor->gpu_addr, &compressor->cpu_addr);
724
725                 if (r)
726                         DRM_ERROR("DM: Failed to initialize FBC\n");
727                 else {
728                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
729                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
730                 }
731
732         }
733
734 }
735
736 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
737                                           int pipe, bool *enabled,
738                                           unsigned char *buf, int max_bytes)
739 {
740         struct drm_device *dev = dev_get_drvdata(kdev);
741         struct amdgpu_device *adev = drm_to_adev(dev);
742         struct drm_connector *connector;
743         struct drm_connector_list_iter conn_iter;
744         struct amdgpu_dm_connector *aconnector;
745         int ret = 0;
746
747         *enabled = false;
748
749         mutex_lock(&adev->dm.audio_lock);
750
751         drm_connector_list_iter_begin(dev, &conn_iter);
752         drm_for_each_connector_iter(connector, &conn_iter) {
753                 aconnector = to_amdgpu_dm_connector(connector);
754                 if (aconnector->audio_inst != port)
755                         continue;
756
757                 *enabled = true;
758                 ret = drm_eld_size(connector->eld);
759                 memcpy(buf, connector->eld, min(max_bytes, ret));
760
761                 break;
762         }
763         drm_connector_list_iter_end(&conn_iter);
764
765         mutex_unlock(&adev->dm.audio_lock);
766
767         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
768
769         return ret;
770 }
771
772 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
773         .get_eld = amdgpu_dm_audio_component_get_eld,
774 };
775
776 static int amdgpu_dm_audio_component_bind(struct device *kdev,
777                                        struct device *hda_kdev, void *data)
778 {
779         struct drm_device *dev = dev_get_drvdata(kdev);
780         struct amdgpu_device *adev = drm_to_adev(dev);
781         struct drm_audio_component *acomp = data;
782
783         acomp->ops = &amdgpu_dm_audio_component_ops;
784         acomp->dev = kdev;
785         adev->dm.audio_component = acomp;
786
787         return 0;
788 }
789
790 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
791                                           struct device *hda_kdev, void *data)
792 {
793         struct drm_device *dev = dev_get_drvdata(kdev);
794         struct amdgpu_device *adev = drm_to_adev(dev);
795         struct drm_audio_component *acomp = data;
796
797         acomp->ops = NULL;
798         acomp->dev = NULL;
799         adev->dm.audio_component = NULL;
800 }
801
802 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
803         .bind   = amdgpu_dm_audio_component_bind,
804         .unbind = amdgpu_dm_audio_component_unbind,
805 };
806
807 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
808 {
809         int i, ret;
810
811         if (!amdgpu_audio)
812                 return 0;
813
814         adev->mode_info.audio.enabled = true;
815
816         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
817
818         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
819                 adev->mode_info.audio.pin[i].channels = -1;
820                 adev->mode_info.audio.pin[i].rate = -1;
821                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
822                 adev->mode_info.audio.pin[i].status_bits = 0;
823                 adev->mode_info.audio.pin[i].category_code = 0;
824                 adev->mode_info.audio.pin[i].connected = false;
825                 adev->mode_info.audio.pin[i].id =
826                         adev->dm.dc->res_pool->audios[i]->inst;
827                 adev->mode_info.audio.pin[i].offset = 0;
828         }
829
830         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
831         if (ret < 0)
832                 return ret;
833
834         adev->dm.audio_registered = true;
835
836         return 0;
837 }
838
839 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
840 {
841         if (!amdgpu_audio)
842                 return;
843
844         if (!adev->mode_info.audio.enabled)
845                 return;
846
847         if (adev->dm.audio_registered) {
848                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
849                 adev->dm.audio_registered = false;
850         }
851
852         /* TODO: Disable audio? */
853
854         adev->mode_info.audio.enabled = false;
855 }
856
857 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
858 {
859         struct drm_audio_component *acomp = adev->dm.audio_component;
860
861         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
862                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
863
864                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
865                                                  pin, -1);
866         }
867 }
868
869 static int dm_dmub_hw_init(struct amdgpu_device *adev)
870 {
871         const struct dmcub_firmware_header_v1_0 *hdr;
872         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
873         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
874         const struct firmware *dmub_fw = adev->dm.dmub_fw;
875         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
876         struct abm *abm = adev->dm.dc->res_pool->abm;
877         struct dmub_srv_hw_params hw_params;
878         enum dmub_status status;
879         const unsigned char *fw_inst_const, *fw_bss_data;
880         uint32_t i, fw_inst_const_size, fw_bss_data_size;
881         bool has_hw_support;
882
883         if (!dmub_srv)
884                 /* DMUB isn't supported on the ASIC. */
885                 return 0;
886
887         if (!fb_info) {
888                 DRM_ERROR("No framebuffer info for DMUB service.\n");
889                 return -EINVAL;
890         }
891
892         if (!dmub_fw) {
893                 /* Firmware required for DMUB support. */
894                 DRM_ERROR("No firmware provided for DMUB.\n");
895                 return -EINVAL;
896         }
897
898         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
899         if (status != DMUB_STATUS_OK) {
900                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
901                 return -EINVAL;
902         }
903
904         if (!has_hw_support) {
905                 DRM_INFO("DMUB unsupported on ASIC\n");
906                 return 0;
907         }
908
909         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
910
911         fw_inst_const = dmub_fw->data +
912                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
913                         PSP_HEADER_BYTES;
914
915         fw_bss_data = dmub_fw->data +
916                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
917                       le32_to_cpu(hdr->inst_const_bytes);
918
919         /* Copy firmware and bios info into FB memory. */
920         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
921                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
922
923         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
924
925         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
926          * amdgpu_ucode_init_single_fw will load dmub firmware
927          * fw_inst_const part to cw0; otherwise, the firmware back door load
928          * will be done by dm_dmub_hw_init
929          */
930         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
931                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
932                                 fw_inst_const_size);
933         }
934
935         if (fw_bss_data_size)
936                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
937                        fw_bss_data, fw_bss_data_size);
938
939         /* Copy firmware bios info into FB memory. */
940         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
941                adev->bios_size);
942
943         /* Reset regions that need to be reset. */
944         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
945         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
946
947         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
948                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
949
950         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
951                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
952
953         /* Initialize hardware. */
954         memset(&hw_params, 0, sizeof(hw_params));
955         hw_params.fb_base = adev->gmc.fb_start;
956         hw_params.fb_offset = adev->gmc.aper_base;
957
958         /* backdoor load firmware and trigger dmub running */
959         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
960                 hw_params.load_inst_const = true;
961
962         if (dmcu)
963                 hw_params.psp_version = dmcu->psp_version;
964
965         for (i = 0; i < fb_info->num_fb; ++i)
966                 hw_params.fb[i] = &fb_info->fb[i];
967
968         status = dmub_srv_hw_init(dmub_srv, &hw_params);
969         if (status != DMUB_STATUS_OK) {
970                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
971                 return -EINVAL;
972         }
973
974         /* Wait for firmware load to finish. */
975         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
976         if (status != DMUB_STATUS_OK)
977                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
978
979         /* Init DMCU and ABM if available. */
980         if (dmcu && abm) {
981                 dmcu->funcs->dmcu_init(dmcu);
982                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
983         }
984
985         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
986         if (!adev->dm.dc->ctx->dmub_srv) {
987                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
988                 return -ENOMEM;
989         }
990
991         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
992                  adev->dm.dmcub_fw_version);
993
994         return 0;
995 }
996
997 #if defined(CONFIG_DRM_AMD_DC_DCN)
998 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
999 {
1000         uint64_t pt_base;
1001         uint32_t logical_addr_low;
1002         uint32_t logical_addr_high;
1003         uint32_t agp_base, agp_bot, agp_top;
1004         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1005
1006         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1007         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1008
1009         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1010                 /*
1011                  * Raven2 has a HW issue that it is unable to use the vram which
1012                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1013                  * workaround that increase system aperture high address (add 1)
1014                  * to get rid of the VM fault and hardware hang.
1015                  */
1016                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1017         else
1018                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1019
1020         agp_base = 0;
1021         agp_bot = adev->gmc.agp_start >> 24;
1022         agp_top = adev->gmc.agp_end >> 24;
1023
1024
1025         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1026         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1027         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1028         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1029         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1030         page_table_base.low_part = lower_32_bits(pt_base);
1031
1032         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1033         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1034
1035         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1036         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1037         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1038
1039         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1040         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1041         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1042
1043         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1044         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1045         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1046
1047         pa_config->is_hvm_enabled = 0;
1048
1049 }
1050 #endif
1051 #if defined(CONFIG_DRM_AMD_DC_DCN)
1052 static void event_mall_stutter(struct work_struct *work)
1053 {
1054
1055         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1056         struct amdgpu_display_manager *dm = vblank_work->dm;
1057
1058         mutex_lock(&dm->dc_lock);
1059
1060         if (vblank_work->enable)
1061                 dm->active_vblank_irq_count++;
1062         else if(dm->active_vblank_irq_count)
1063                 dm->active_vblank_irq_count--;
1064
1065         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1066
1067         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1068
1069         mutex_unlock(&dm->dc_lock);
1070 }
1071
1072 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1073 {
1074
1075         int max_caps = dc->caps.max_links;
1076         struct vblank_workqueue *vblank_work;
1077         int i = 0;
1078
1079         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1080         if (ZERO_OR_NULL_PTR(vblank_work)) {
1081                 kfree(vblank_work);
1082                 return NULL;
1083         }
1084
1085         for (i = 0; i < max_caps; i++)
1086                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1087
1088         return vblank_work;
1089 }
1090 #endif
1091 static int amdgpu_dm_init(struct amdgpu_device *adev)
1092 {
1093         struct dc_init_data init_data;
1094 #ifdef CONFIG_DRM_AMD_DC_HDCP
1095         struct dc_callback_init init_params;
1096 #endif
1097         int r;
1098
1099         adev->dm.ddev = adev_to_drm(adev);
1100         adev->dm.adev = adev;
1101
1102         /* Zero all the fields */
1103         memset(&init_data, 0, sizeof(init_data));
1104 #ifdef CONFIG_DRM_AMD_DC_HDCP
1105         memset(&init_params, 0, sizeof(init_params));
1106 #endif
1107
1108         mutex_init(&adev->dm.dc_lock);
1109         mutex_init(&adev->dm.audio_lock);
1110 #if defined(CONFIG_DRM_AMD_DC_DCN)
1111         spin_lock_init(&adev->dm.vblank_lock);
1112 #endif
1113
1114         if(amdgpu_dm_irq_init(adev)) {
1115                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1116                 goto error;
1117         }
1118
1119         init_data.asic_id.chip_family = adev->family;
1120
1121         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1122         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1123
1124         init_data.asic_id.vram_width = adev->gmc.vram_width;
1125         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1126         init_data.asic_id.atombios_base_address =
1127                 adev->mode_info.atom_context->bios;
1128
1129         init_data.driver = adev;
1130
1131         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1132
1133         if (!adev->dm.cgs_device) {
1134                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1135                 goto error;
1136         }
1137
1138         init_data.cgs_device = adev->dm.cgs_device;
1139
1140         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1141
1142         switch (adev->asic_type) {
1143         case CHIP_CARRIZO:
1144         case CHIP_STONEY:
1145         case CHIP_RAVEN:
1146         case CHIP_RENOIR:
1147                 init_data.flags.gpu_vm_support = true;
1148                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1149                         init_data.flags.disable_dmcu = true;
1150                 break;
1151 #if defined(CONFIG_DRM_AMD_DC_DCN)
1152         case CHIP_VANGOGH:
1153                 init_data.flags.gpu_vm_support = true;
1154                 break;
1155 #endif
1156         default:
1157                 break;
1158         }
1159
1160         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1161                 init_data.flags.fbc_support = true;
1162
1163         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1164                 init_data.flags.multi_mon_pp_mclk_switch = true;
1165
1166         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1167                 init_data.flags.disable_fractional_pwm = true;
1168
1169         init_data.flags.power_down_display_on_boot = true;
1170
1171         INIT_LIST_HEAD(&adev->dm.da_list);
1172         /* Display Core create. */
1173         adev->dm.dc = dc_create(&init_data);
1174
1175         if (adev->dm.dc) {
1176                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1177         } else {
1178                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1179                 goto error;
1180         }
1181
1182         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1183                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1184                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1185         }
1186
1187         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1188                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1189
1190         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1191                 adev->dm.dc->debug.disable_stutter = true;
1192
1193         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1194                 adev->dm.dc->debug.disable_dsc = true;
1195
1196         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1197                 adev->dm.dc->debug.disable_clock_gate = true;
1198
1199         r = dm_dmub_hw_init(adev);
1200         if (r) {
1201                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1202                 goto error;
1203         }
1204
1205         dc_hardware_init(adev->dm.dc);
1206
1207 #if defined(CONFIG_DRM_AMD_DC_DCN)
1208         if (adev->apu_flags) {
1209                 struct dc_phy_addr_space_config pa_config;
1210
1211                 mmhub_read_system_context(adev, &pa_config);
1212
1213                 // Call the DC init_memory func
1214                 dc_setup_system_context(adev->dm.dc, &pa_config);
1215         }
1216 #endif
1217
1218         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1219         if (!adev->dm.freesync_module) {
1220                 DRM_ERROR(
1221                 "amdgpu: failed to initialize freesync_module.\n");
1222         } else
1223                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1224                                 adev->dm.freesync_module);
1225
1226         amdgpu_dm_init_color_mod();
1227
1228 #if defined(CONFIG_DRM_AMD_DC_DCN)
1229         if (adev->dm.dc->caps.max_links > 0) {
1230                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1231
1232                 if (!adev->dm.vblank_workqueue)
1233                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1234                 else
1235                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1236         }
1237 #endif
1238
1239 #ifdef CONFIG_DRM_AMD_DC_HDCP
1240         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1241                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1242
1243                 if (!adev->dm.hdcp_workqueue)
1244                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1245                 else
1246                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1247
1248                 dc_init_callbacks(adev->dm.dc, &init_params);
1249         }
1250 #endif
1251 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1252         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1253 #endif
1254         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1255                 init_completion(&adev->dm.dmub_aux_transfer_done);
1256                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1257                 if (!adev->dm.dmub_notify) {
1258                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1259                         goto error;
1260                 }
1261                 amdgpu_dm_outbox_init(adev);
1262         }
1263
1264         if (amdgpu_dm_initialize_drm_device(adev)) {
1265                 DRM_ERROR(
1266                 "amdgpu: failed to initialize sw for display support.\n");
1267                 goto error;
1268         }
1269
1270         /* create fake encoders for MST */
1271         dm_dp_create_fake_mst_encoders(adev);
1272
1273         /* TODO: Add_display_info? */
1274
1275         /* TODO use dynamic cursor width */
1276         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1277         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1278
1279         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1280                 DRM_ERROR(
1281                 "amdgpu: failed to initialize sw for display support.\n");
1282                 goto error;
1283         }
1284
1285
1286         DRM_DEBUG_DRIVER("KMS initialized.\n");
1287
1288         return 0;
1289 error:
1290         amdgpu_dm_fini(adev);
1291
1292         return -EINVAL;
1293 }
1294
1295 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1296 {
1297         int i;
1298
1299         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1300                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1301         }
1302
1303         amdgpu_dm_audio_fini(adev);
1304
1305         amdgpu_dm_destroy_drm_device(&adev->dm);
1306
1307 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1308         if (adev->dm.crc_rd_wrk) {
1309                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1310                 kfree(adev->dm.crc_rd_wrk);
1311                 adev->dm.crc_rd_wrk = NULL;
1312         }
1313 #endif
1314 #ifdef CONFIG_DRM_AMD_DC_HDCP
1315         if (adev->dm.hdcp_workqueue) {
1316                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1317                 adev->dm.hdcp_workqueue = NULL;
1318         }
1319
1320         if (adev->dm.dc)
1321                 dc_deinit_callbacks(adev->dm.dc);
1322 #endif
1323
1324 #if defined(CONFIG_DRM_AMD_DC_DCN)
1325         if (adev->dm.vblank_workqueue) {
1326                 adev->dm.vblank_workqueue->dm = NULL;
1327                 kfree(adev->dm.vblank_workqueue);
1328                 adev->dm.vblank_workqueue = NULL;
1329         }
1330 #endif
1331
1332         if (adev->dm.dc->ctx->dmub_srv) {
1333                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1334                 adev->dm.dc->ctx->dmub_srv = NULL;
1335         }
1336
1337         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1338                 kfree(adev->dm.dmub_notify);
1339                 adev->dm.dmub_notify = NULL;
1340         }
1341
1342         if (adev->dm.dmub_bo)
1343                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1344                                       &adev->dm.dmub_bo_gpu_addr,
1345                                       &adev->dm.dmub_bo_cpu_addr);
1346
1347         /* DC Destroy TODO: Replace destroy DAL */
1348         if (adev->dm.dc)
1349                 dc_destroy(&adev->dm.dc);
1350         /*
1351          * TODO: pageflip, vlank interrupt
1352          *
1353          * amdgpu_dm_irq_fini(adev);
1354          */
1355
1356         if (adev->dm.cgs_device) {
1357                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1358                 adev->dm.cgs_device = NULL;
1359         }
1360         if (adev->dm.freesync_module) {
1361                 mod_freesync_destroy(adev->dm.freesync_module);
1362                 adev->dm.freesync_module = NULL;
1363         }
1364
1365         mutex_destroy(&adev->dm.audio_lock);
1366         mutex_destroy(&adev->dm.dc_lock);
1367
1368         return;
1369 }
1370
1371 static int load_dmcu_fw(struct amdgpu_device *adev)
1372 {
1373         const char *fw_name_dmcu = NULL;
1374         int r;
1375         const struct dmcu_firmware_header_v1_0 *hdr;
1376
1377         switch(adev->asic_type) {
1378 #if defined(CONFIG_DRM_AMD_DC_SI)
1379         case CHIP_TAHITI:
1380         case CHIP_PITCAIRN:
1381         case CHIP_VERDE:
1382         case CHIP_OLAND:
1383 #endif
1384         case CHIP_BONAIRE:
1385         case CHIP_HAWAII:
1386         case CHIP_KAVERI:
1387         case CHIP_KABINI:
1388         case CHIP_MULLINS:
1389         case CHIP_TONGA:
1390         case CHIP_FIJI:
1391         case CHIP_CARRIZO:
1392         case CHIP_STONEY:
1393         case CHIP_POLARIS11:
1394         case CHIP_POLARIS10:
1395         case CHIP_POLARIS12:
1396         case CHIP_VEGAM:
1397         case CHIP_VEGA10:
1398         case CHIP_VEGA12:
1399         case CHIP_VEGA20:
1400         case CHIP_NAVI10:
1401         case CHIP_NAVI14:
1402         case CHIP_RENOIR:
1403         case CHIP_SIENNA_CICHLID:
1404         case CHIP_NAVY_FLOUNDER:
1405         case CHIP_DIMGREY_CAVEFISH:
1406         case CHIP_BEIGE_GOBY:
1407         case CHIP_VANGOGH:
1408                 return 0;
1409         case CHIP_NAVI12:
1410                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1411                 break;
1412         case CHIP_RAVEN:
1413                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1414                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1415                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1416                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1417                 else
1418                         return 0;
1419                 break;
1420         default:
1421                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1422                 return -EINVAL;
1423         }
1424
1425         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1426                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1427                 return 0;
1428         }
1429
1430         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1431         if (r == -ENOENT) {
1432                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1433                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1434                 adev->dm.fw_dmcu = NULL;
1435                 return 0;
1436         }
1437         if (r) {
1438                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1439                         fw_name_dmcu);
1440                 return r;
1441         }
1442
1443         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1444         if (r) {
1445                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1446                         fw_name_dmcu);
1447                 release_firmware(adev->dm.fw_dmcu);
1448                 adev->dm.fw_dmcu = NULL;
1449                 return r;
1450         }
1451
1452         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1453         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1454         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1455         adev->firmware.fw_size +=
1456                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1457
1458         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1459         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1460         adev->firmware.fw_size +=
1461                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1462
1463         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1464
1465         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1466
1467         return 0;
1468 }
1469
1470 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1471 {
1472         struct amdgpu_device *adev = ctx;
1473
1474         return dm_read_reg(adev->dm.dc->ctx, address);
1475 }
1476
1477 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1478                                      uint32_t value)
1479 {
1480         struct amdgpu_device *adev = ctx;
1481
1482         return dm_write_reg(adev->dm.dc->ctx, address, value);
1483 }
1484
1485 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1486 {
1487         struct dmub_srv_create_params create_params;
1488         struct dmub_srv_region_params region_params;
1489         struct dmub_srv_region_info region_info;
1490         struct dmub_srv_fb_params fb_params;
1491         struct dmub_srv_fb_info *fb_info;
1492         struct dmub_srv *dmub_srv;
1493         const struct dmcub_firmware_header_v1_0 *hdr;
1494         const char *fw_name_dmub;
1495         enum dmub_asic dmub_asic;
1496         enum dmub_status status;
1497         int r;
1498
1499         switch (adev->asic_type) {
1500         case CHIP_RENOIR:
1501                 dmub_asic = DMUB_ASIC_DCN21;
1502                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1503                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1504                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1505                 break;
1506         case CHIP_SIENNA_CICHLID:
1507                 dmub_asic = DMUB_ASIC_DCN30;
1508                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1509                 break;
1510         case CHIP_NAVY_FLOUNDER:
1511                 dmub_asic = DMUB_ASIC_DCN30;
1512                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1513                 break;
1514         case CHIP_VANGOGH:
1515                 dmub_asic = DMUB_ASIC_DCN301;
1516                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1517                 break;
1518         case CHIP_DIMGREY_CAVEFISH:
1519                 dmub_asic = DMUB_ASIC_DCN302;
1520                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1521                 break;
1522         case CHIP_BEIGE_GOBY:
1523                 dmub_asic = DMUB_ASIC_DCN303;
1524                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1525                 break;
1526
1527         default:
1528                 /* ASIC doesn't support DMUB. */
1529                 return 0;
1530         }
1531
1532         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1533         if (r) {
1534                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1535                 return 0;
1536         }
1537
1538         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1539         if (r) {
1540                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1541                 return 0;
1542         }
1543
1544         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1545
1546         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1547                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1548                         AMDGPU_UCODE_ID_DMCUB;
1549                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1550                         adev->dm.dmub_fw;
1551                 adev->firmware.fw_size +=
1552                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1553
1554                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1555                          adev->dm.dmcub_fw_version);
1556         }
1557
1558         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1559
1560         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1561         dmub_srv = adev->dm.dmub_srv;
1562
1563         if (!dmub_srv) {
1564                 DRM_ERROR("Failed to allocate DMUB service!\n");
1565                 return -ENOMEM;
1566         }
1567
1568         memset(&create_params, 0, sizeof(create_params));
1569         create_params.user_ctx = adev;
1570         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1571         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1572         create_params.asic = dmub_asic;
1573
1574         /* Create the DMUB service. */
1575         status = dmub_srv_create(dmub_srv, &create_params);
1576         if (status != DMUB_STATUS_OK) {
1577                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1578                 return -EINVAL;
1579         }
1580
1581         /* Calculate the size of all the regions for the DMUB service. */
1582         memset(&region_params, 0, sizeof(region_params));
1583
1584         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1585                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1586         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1587         region_params.vbios_size = adev->bios_size;
1588         region_params.fw_bss_data = region_params.bss_data_size ?
1589                 adev->dm.dmub_fw->data +
1590                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1591                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1592         region_params.fw_inst_const =
1593                 adev->dm.dmub_fw->data +
1594                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1595                 PSP_HEADER_BYTES;
1596
1597         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1598                                            &region_info);
1599
1600         if (status != DMUB_STATUS_OK) {
1601                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1602                 return -EINVAL;
1603         }
1604
1605         /*
1606          * Allocate a framebuffer based on the total size of all the regions.
1607          * TODO: Move this into GART.
1608          */
1609         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1610                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1611                                     &adev->dm.dmub_bo_gpu_addr,
1612                                     &adev->dm.dmub_bo_cpu_addr);
1613         if (r)
1614                 return r;
1615
1616         /* Rebase the regions on the framebuffer address. */
1617         memset(&fb_params, 0, sizeof(fb_params));
1618         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1619         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1620         fb_params.region_info = &region_info;
1621
1622         adev->dm.dmub_fb_info =
1623                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1624         fb_info = adev->dm.dmub_fb_info;
1625
1626         if (!fb_info) {
1627                 DRM_ERROR(
1628                         "Failed to allocate framebuffer info for DMUB service!\n");
1629                 return -ENOMEM;
1630         }
1631
1632         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1633         if (status != DMUB_STATUS_OK) {
1634                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1635                 return -EINVAL;
1636         }
1637
1638         return 0;
1639 }
1640
1641 static int dm_sw_init(void *handle)
1642 {
1643         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1644         int r;
1645
1646         r = dm_dmub_sw_init(adev);
1647         if (r)
1648                 return r;
1649
1650         return load_dmcu_fw(adev);
1651 }
1652
1653 static int dm_sw_fini(void *handle)
1654 {
1655         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656
1657         kfree(adev->dm.dmub_fb_info);
1658         adev->dm.dmub_fb_info = NULL;
1659
1660         if (adev->dm.dmub_srv) {
1661                 dmub_srv_destroy(adev->dm.dmub_srv);
1662                 adev->dm.dmub_srv = NULL;
1663         }
1664
1665         release_firmware(adev->dm.dmub_fw);
1666         adev->dm.dmub_fw = NULL;
1667
1668         release_firmware(adev->dm.fw_dmcu);
1669         adev->dm.fw_dmcu = NULL;
1670
1671         return 0;
1672 }
1673
1674 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1675 {
1676         struct amdgpu_dm_connector *aconnector;
1677         struct drm_connector *connector;
1678         struct drm_connector_list_iter iter;
1679         int ret = 0;
1680
1681         drm_connector_list_iter_begin(dev, &iter);
1682         drm_for_each_connector_iter(connector, &iter) {
1683                 aconnector = to_amdgpu_dm_connector(connector);
1684                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1685                     aconnector->mst_mgr.aux) {
1686                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1687                                          aconnector,
1688                                          aconnector->base.base.id);
1689
1690                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1691                         if (ret < 0) {
1692                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1693                                 aconnector->dc_link->type =
1694                                         dc_connection_single;
1695                                 break;
1696                         }
1697                 }
1698         }
1699         drm_connector_list_iter_end(&iter);
1700
1701         return ret;
1702 }
1703
1704 static int dm_late_init(void *handle)
1705 {
1706         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1707
1708         struct dmcu_iram_parameters params;
1709         unsigned int linear_lut[16];
1710         int i;
1711         struct dmcu *dmcu = NULL;
1712         bool ret = true;
1713
1714         dmcu = adev->dm.dc->res_pool->dmcu;
1715
1716         for (i = 0; i < 16; i++)
1717                 linear_lut[i] = 0xFFFF * i / 15;
1718
1719         params.set = 0;
1720         params.backlight_ramping_start = 0xCCCC;
1721         params.backlight_ramping_reduction = 0xCCCCCCCC;
1722         params.backlight_lut_array_size = 16;
1723         params.backlight_lut_array = linear_lut;
1724
1725         /* Min backlight level after ABM reduction,  Don't allow below 1%
1726          * 0xFFFF x 0.01 = 0x28F
1727          */
1728         params.min_abm_backlight = 0x28F;
1729
1730         /* In the case where abm is implemented on dmcub,
1731          * dmcu object will be null.
1732          * ABM 2.4 and up are implemented on dmcub.
1733          */
1734         if (dmcu)
1735                 ret = dmcu_load_iram(dmcu, params);
1736         else if (adev->dm.dc->ctx->dmub_srv)
1737                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1738
1739         if (!ret)
1740                 return -EINVAL;
1741
1742         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1743 }
1744
1745 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1746 {
1747         struct amdgpu_dm_connector *aconnector;
1748         struct drm_connector *connector;
1749         struct drm_connector_list_iter iter;
1750         struct drm_dp_mst_topology_mgr *mgr;
1751         int ret;
1752         bool need_hotplug = false;
1753
1754         drm_connector_list_iter_begin(dev, &iter);
1755         drm_for_each_connector_iter(connector, &iter) {
1756                 aconnector = to_amdgpu_dm_connector(connector);
1757                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1758                     aconnector->mst_port)
1759                         continue;
1760
1761                 mgr = &aconnector->mst_mgr;
1762
1763                 if (suspend) {
1764                         drm_dp_mst_topology_mgr_suspend(mgr);
1765                 } else {
1766                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1767                         if (ret < 0) {
1768                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1769                                 need_hotplug = true;
1770                         }
1771                 }
1772         }
1773         drm_connector_list_iter_end(&iter);
1774
1775         if (need_hotplug)
1776                 drm_kms_helper_hotplug_event(dev);
1777 }
1778
1779 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1780 {
1781         struct smu_context *smu = &adev->smu;
1782         int ret = 0;
1783
1784         if (!is_support_sw_smu(adev))
1785                 return 0;
1786
1787         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1788          * on window driver dc implementation.
1789          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1790          * should be passed to smu during boot up and resume from s3.
1791          * boot up: dc calculate dcn watermark clock settings within dc_create,
1792          * dcn20_resource_construct
1793          * then call pplib functions below to pass the settings to smu:
1794          * smu_set_watermarks_for_clock_ranges
1795          * smu_set_watermarks_table
1796          * navi10_set_watermarks_table
1797          * smu_write_watermarks_table
1798          *
1799          * For Renoir, clock settings of dcn watermark are also fixed values.
1800          * dc has implemented different flow for window driver:
1801          * dc_hardware_init / dc_set_power_state
1802          * dcn10_init_hw
1803          * notify_wm_ranges
1804          * set_wm_ranges
1805          * -- Linux
1806          * smu_set_watermarks_for_clock_ranges
1807          * renoir_set_watermarks_table
1808          * smu_write_watermarks_table
1809          *
1810          * For Linux,
1811          * dc_hardware_init -> amdgpu_dm_init
1812          * dc_set_power_state --> dm_resume
1813          *
1814          * therefore, this function apply to navi10/12/14 but not Renoir
1815          * *
1816          */
1817         switch(adev->asic_type) {
1818         case CHIP_NAVI10:
1819         case CHIP_NAVI14:
1820         case CHIP_NAVI12:
1821                 break;
1822         default:
1823                 return 0;
1824         }
1825
1826         ret = smu_write_watermarks_table(smu);
1827         if (ret) {
1828                 DRM_ERROR("Failed to update WMTABLE!\n");
1829                 return ret;
1830         }
1831
1832         return 0;
1833 }
1834
1835 /**
1836  * dm_hw_init() - Initialize DC device
1837  * @handle: The base driver device containing the amdgpu_dm device.
1838  *
1839  * Initialize the &struct amdgpu_display_manager device. This involves calling
1840  * the initializers of each DM component, then populating the struct with them.
1841  *
1842  * Although the function implies hardware initialization, both hardware and
1843  * software are initialized here. Splitting them out to their relevant init
1844  * hooks is a future TODO item.
1845  *
1846  * Some notable things that are initialized here:
1847  *
1848  * - Display Core, both software and hardware
1849  * - DC modules that we need (freesync and color management)
1850  * - DRM software states
1851  * - Interrupt sources and handlers
1852  * - Vblank support
1853  * - Debug FS entries, if enabled
1854  */
1855 static int dm_hw_init(void *handle)
1856 {
1857         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1858         /* Create DAL display manager */
1859         amdgpu_dm_init(adev);
1860         amdgpu_dm_hpd_init(adev);
1861
1862         return 0;
1863 }
1864
1865 /**
1866  * dm_hw_fini() - Teardown DC device
1867  * @handle: The base driver device containing the amdgpu_dm device.
1868  *
1869  * Teardown components within &struct amdgpu_display_manager that require
1870  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1871  * were loaded. Also flush IRQ workqueues and disable them.
1872  */
1873 static int dm_hw_fini(void *handle)
1874 {
1875         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1876
1877         amdgpu_dm_hpd_fini(adev);
1878
1879         amdgpu_dm_irq_fini(adev);
1880         amdgpu_dm_fini(adev);
1881         return 0;
1882 }
1883
1884
1885 static int dm_enable_vblank(struct drm_crtc *crtc);
1886 static void dm_disable_vblank(struct drm_crtc *crtc);
1887
1888 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1889                                  struct dc_state *state, bool enable)
1890 {
1891         enum dc_irq_source irq_source;
1892         struct amdgpu_crtc *acrtc;
1893         int rc = -EBUSY;
1894         int i = 0;
1895
1896         for (i = 0; i < state->stream_count; i++) {
1897                 acrtc = get_crtc_by_otg_inst(
1898                                 adev, state->stream_status[i].primary_otg_inst);
1899
1900                 if (acrtc && state->stream_status[i].plane_count != 0) {
1901                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1902                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1903                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1904                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1905                         if (rc)
1906                                 DRM_WARN("Failed to %s pflip interrupts\n",
1907                                          enable ? "enable" : "disable");
1908
1909                         if (enable) {
1910                                 rc = dm_enable_vblank(&acrtc->base);
1911                                 if (rc)
1912                                         DRM_WARN("Failed to enable vblank interrupts\n");
1913                         } else {
1914                                 dm_disable_vblank(&acrtc->base);
1915                         }
1916
1917                 }
1918         }
1919
1920 }
1921
1922 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1923 {
1924         struct dc_state *context = NULL;
1925         enum dc_status res = DC_ERROR_UNEXPECTED;
1926         int i;
1927         struct dc_stream_state *del_streams[MAX_PIPES];
1928         int del_streams_count = 0;
1929
1930         memset(del_streams, 0, sizeof(del_streams));
1931
1932         context = dc_create_state(dc);
1933         if (context == NULL)
1934                 goto context_alloc_fail;
1935
1936         dc_resource_state_copy_construct_current(dc, context);
1937
1938         /* First remove from context all streams */
1939         for (i = 0; i < context->stream_count; i++) {
1940                 struct dc_stream_state *stream = context->streams[i];
1941
1942                 del_streams[del_streams_count++] = stream;
1943         }
1944
1945         /* Remove all planes for removed streams and then remove the streams */
1946         for (i = 0; i < del_streams_count; i++) {
1947                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1948                         res = DC_FAIL_DETACH_SURFACES;
1949                         goto fail;
1950                 }
1951
1952                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1953                 if (res != DC_OK)
1954                         goto fail;
1955         }
1956
1957
1958         res = dc_validate_global_state(dc, context, false);
1959
1960         if (res != DC_OK) {
1961                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1962                 goto fail;
1963         }
1964
1965         res = dc_commit_state(dc, context);
1966
1967 fail:
1968         dc_release_state(context);
1969
1970 context_alloc_fail:
1971         return res;
1972 }
1973
1974 static int dm_suspend(void *handle)
1975 {
1976         struct amdgpu_device *adev = handle;
1977         struct amdgpu_display_manager *dm = &adev->dm;
1978         int ret = 0;
1979
1980         if (amdgpu_in_reset(adev)) {
1981                 mutex_lock(&dm->dc_lock);
1982
1983 #if defined(CONFIG_DRM_AMD_DC_DCN)
1984                 dc_allow_idle_optimizations(adev->dm.dc, false);
1985 #endif
1986
1987                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1988
1989                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1990
1991                 amdgpu_dm_commit_zero_streams(dm->dc);
1992
1993                 amdgpu_dm_irq_suspend(adev);
1994
1995                 return ret;
1996         }
1997
1998         WARN_ON(adev->dm.cached_state);
1999         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2000
2001         s3_handle_mst(adev_to_drm(adev), true);
2002
2003         amdgpu_dm_irq_suspend(adev);
2004
2005
2006         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2007
2008         return 0;
2009 }
2010
2011 static struct amdgpu_dm_connector *
2012 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2013                                              struct drm_crtc *crtc)
2014 {
2015         uint32_t i;
2016         struct drm_connector_state *new_con_state;
2017         struct drm_connector *connector;
2018         struct drm_crtc *crtc_from_state;
2019
2020         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2021                 crtc_from_state = new_con_state->crtc;
2022
2023                 if (crtc_from_state == crtc)
2024                         return to_amdgpu_dm_connector(connector);
2025         }
2026
2027         return NULL;
2028 }
2029
2030 static void emulated_link_detect(struct dc_link *link)
2031 {
2032         struct dc_sink_init_data sink_init_data = { 0 };
2033         struct display_sink_capability sink_caps = { 0 };
2034         enum dc_edid_status edid_status;
2035         struct dc_context *dc_ctx = link->ctx;
2036         struct dc_sink *sink = NULL;
2037         struct dc_sink *prev_sink = NULL;
2038
2039         link->type = dc_connection_none;
2040         prev_sink = link->local_sink;
2041
2042         if (prev_sink)
2043                 dc_sink_release(prev_sink);
2044
2045         switch (link->connector_signal) {
2046         case SIGNAL_TYPE_HDMI_TYPE_A: {
2047                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2048                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2049                 break;
2050         }
2051
2052         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2053                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2054                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2055                 break;
2056         }
2057
2058         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2059                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2060                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2061                 break;
2062         }
2063
2064         case SIGNAL_TYPE_LVDS: {
2065                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2066                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2067                 break;
2068         }
2069
2070         case SIGNAL_TYPE_EDP: {
2071                 sink_caps.transaction_type =
2072                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2073                 sink_caps.signal = SIGNAL_TYPE_EDP;
2074                 break;
2075         }
2076
2077         case SIGNAL_TYPE_DISPLAY_PORT: {
2078                 sink_caps.transaction_type =
2079                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2080                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2081                 break;
2082         }
2083
2084         default:
2085                 DC_ERROR("Invalid connector type! signal:%d\n",
2086                         link->connector_signal);
2087                 return;
2088         }
2089
2090         sink_init_data.link = link;
2091         sink_init_data.sink_signal = sink_caps.signal;
2092
2093         sink = dc_sink_create(&sink_init_data);
2094         if (!sink) {
2095                 DC_ERROR("Failed to create sink!\n");
2096                 return;
2097         }
2098
2099         /* dc_sink_create returns a new reference */
2100         link->local_sink = sink;
2101
2102         edid_status = dm_helpers_read_local_edid(
2103                         link->ctx,
2104                         link,
2105                         sink);
2106
2107         if (edid_status != EDID_OK)
2108                 DC_ERROR("Failed to read EDID");
2109
2110 }
2111
2112 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2113                                      struct amdgpu_display_manager *dm)
2114 {
2115         struct {
2116                 struct dc_surface_update surface_updates[MAX_SURFACES];
2117                 struct dc_plane_info plane_infos[MAX_SURFACES];
2118                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2119                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2120                 struct dc_stream_update stream_update;
2121         } * bundle;
2122         int k, m;
2123
2124         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2125
2126         if (!bundle) {
2127                 dm_error("Failed to allocate update bundle\n");
2128                 goto cleanup;
2129         }
2130
2131         for (k = 0; k < dc_state->stream_count; k++) {
2132                 bundle->stream_update.stream = dc_state->streams[k];
2133
2134                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2135                         bundle->surface_updates[m].surface =
2136                                 dc_state->stream_status->plane_states[m];
2137                         bundle->surface_updates[m].surface->force_full_update =
2138                                 true;
2139                 }
2140                 dc_commit_updates_for_stream(
2141                         dm->dc, bundle->surface_updates,
2142                         dc_state->stream_status->plane_count,
2143                         dc_state->streams[k], &bundle->stream_update, dc_state);
2144         }
2145
2146 cleanup:
2147         kfree(bundle);
2148
2149         return;
2150 }
2151
2152 static void dm_set_dpms_off(struct dc_link *link)
2153 {
2154         struct dc_stream_state *stream_state;
2155         struct amdgpu_dm_connector *aconnector = link->priv;
2156         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2157         struct dc_stream_update stream_update;
2158         bool dpms_off = true;
2159
2160         memset(&stream_update, 0, sizeof(stream_update));
2161         stream_update.dpms_off = &dpms_off;
2162
2163         mutex_lock(&adev->dm.dc_lock);
2164         stream_state = dc_stream_find_from_link(link);
2165
2166         if (stream_state == NULL) {
2167                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2168                 mutex_unlock(&adev->dm.dc_lock);
2169                 return;
2170         }
2171
2172         stream_update.stream = stream_state;
2173         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2174                                      stream_state, &stream_update,
2175                                      stream_state->ctx->dc->current_state);
2176         mutex_unlock(&adev->dm.dc_lock);
2177 }
2178
2179 static int dm_resume(void *handle)
2180 {
2181         struct amdgpu_device *adev = handle;
2182         struct drm_device *ddev = adev_to_drm(adev);
2183         struct amdgpu_display_manager *dm = &adev->dm;
2184         struct amdgpu_dm_connector *aconnector;
2185         struct drm_connector *connector;
2186         struct drm_connector_list_iter iter;
2187         struct drm_crtc *crtc;
2188         struct drm_crtc_state *new_crtc_state;
2189         struct dm_crtc_state *dm_new_crtc_state;
2190         struct drm_plane *plane;
2191         struct drm_plane_state *new_plane_state;
2192         struct dm_plane_state *dm_new_plane_state;
2193         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2194         enum dc_connection_type new_connection_type = dc_connection_none;
2195         struct dc_state *dc_state;
2196         int i, r, j;
2197
2198         if (amdgpu_in_reset(adev)) {
2199                 dc_state = dm->cached_dc_state;
2200
2201                 r = dm_dmub_hw_init(adev);
2202                 if (r)
2203                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2204
2205                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2206                 dc_resume(dm->dc);
2207
2208                 amdgpu_dm_irq_resume_early(adev);
2209
2210                 for (i = 0; i < dc_state->stream_count; i++) {
2211                         dc_state->streams[i]->mode_changed = true;
2212                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2213                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2214                                         = 0xffffffff;
2215                         }
2216                 }
2217
2218                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2219
2220                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2221
2222                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2223
2224                 dc_release_state(dm->cached_dc_state);
2225                 dm->cached_dc_state = NULL;
2226
2227                 amdgpu_dm_irq_resume_late(adev);
2228
2229                 mutex_unlock(&dm->dc_lock);
2230
2231                 return 0;
2232         }
2233         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2234         dc_release_state(dm_state->context);
2235         dm_state->context = dc_create_state(dm->dc);
2236         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2237         dc_resource_state_construct(dm->dc, dm_state->context);
2238
2239         /* Before powering on DC we need to re-initialize DMUB. */
2240         r = dm_dmub_hw_init(adev);
2241         if (r)
2242                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2243
2244         /* power on hardware */
2245         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2246
2247         /* program HPD filter */
2248         dc_resume(dm->dc);
2249
2250         /*
2251          * early enable HPD Rx IRQ, should be done before set mode as short
2252          * pulse interrupts are used for MST
2253          */
2254         amdgpu_dm_irq_resume_early(adev);
2255
2256         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2257         s3_handle_mst(ddev, false);
2258
2259         /* Do detection*/
2260         drm_connector_list_iter_begin(ddev, &iter);
2261         drm_for_each_connector_iter(connector, &iter) {
2262                 aconnector = to_amdgpu_dm_connector(connector);
2263
2264                 /*
2265                  * this is the case when traversing through already created
2266                  * MST connectors, should be skipped
2267                  */
2268                 if (aconnector->mst_port)
2269                         continue;
2270
2271                 mutex_lock(&aconnector->hpd_lock);
2272                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2273                         DRM_ERROR("KMS: Failed to detect connector\n");
2274
2275                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2276                         emulated_link_detect(aconnector->dc_link);
2277                 else
2278                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2279
2280                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2281                         aconnector->fake_enable = false;
2282
2283                 if (aconnector->dc_sink)
2284                         dc_sink_release(aconnector->dc_sink);
2285                 aconnector->dc_sink = NULL;
2286                 amdgpu_dm_update_connector_after_detect(aconnector);
2287                 mutex_unlock(&aconnector->hpd_lock);
2288         }
2289         drm_connector_list_iter_end(&iter);
2290
2291         /* Force mode set in atomic commit */
2292         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2293                 new_crtc_state->active_changed = true;
2294
2295         /*
2296          * atomic_check is expected to create the dc states. We need to release
2297          * them here, since they were duplicated as part of the suspend
2298          * procedure.
2299          */
2300         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2301                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2302                 if (dm_new_crtc_state->stream) {
2303                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2304                         dc_stream_release(dm_new_crtc_state->stream);
2305                         dm_new_crtc_state->stream = NULL;
2306                 }
2307         }
2308
2309         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2310                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2311                 if (dm_new_plane_state->dc_state) {
2312                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2313                         dc_plane_state_release(dm_new_plane_state->dc_state);
2314                         dm_new_plane_state->dc_state = NULL;
2315                 }
2316         }
2317
2318         drm_atomic_helper_resume(ddev, dm->cached_state);
2319
2320         dm->cached_state = NULL;
2321
2322         amdgpu_dm_irq_resume_late(adev);
2323
2324         amdgpu_dm_smu_write_watermarks_table(adev);
2325
2326         return 0;
2327 }
2328
2329 /**
2330  * DOC: DM Lifecycle
2331  *
2332  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2333  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2334  * the base driver's device list to be initialized and torn down accordingly.
2335  *
2336  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2337  */
2338
2339 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2340         .name = "dm",
2341         .early_init = dm_early_init,
2342         .late_init = dm_late_init,
2343         .sw_init = dm_sw_init,
2344         .sw_fini = dm_sw_fini,
2345         .hw_init = dm_hw_init,
2346         .hw_fini = dm_hw_fini,
2347         .suspend = dm_suspend,
2348         .resume = dm_resume,
2349         .is_idle = dm_is_idle,
2350         .wait_for_idle = dm_wait_for_idle,
2351         .check_soft_reset = dm_check_soft_reset,
2352         .soft_reset = dm_soft_reset,
2353         .set_clockgating_state = dm_set_clockgating_state,
2354         .set_powergating_state = dm_set_powergating_state,
2355 };
2356
2357 const struct amdgpu_ip_block_version dm_ip_block =
2358 {
2359         .type = AMD_IP_BLOCK_TYPE_DCE,
2360         .major = 1,
2361         .minor = 0,
2362         .rev = 0,
2363         .funcs = &amdgpu_dm_funcs,
2364 };
2365
2366
2367 /**
2368  * DOC: atomic
2369  *
2370  * *WIP*
2371  */
2372
2373 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2374         .fb_create = amdgpu_display_user_framebuffer_create,
2375         .get_format_info = amd_get_format_info,
2376         .output_poll_changed = drm_fb_helper_output_poll_changed,
2377         .atomic_check = amdgpu_dm_atomic_check,
2378         .atomic_commit = drm_atomic_helper_commit,
2379 };
2380
2381 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2382         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2383 };
2384
2385 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2386 {
2387         u32 max_cll, min_cll, max, min, q, r;
2388         struct amdgpu_dm_backlight_caps *caps;
2389         struct amdgpu_display_manager *dm;
2390         struct drm_connector *conn_base;
2391         struct amdgpu_device *adev;
2392         struct dc_link *link = NULL;
2393         static const u8 pre_computed_values[] = {
2394                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2395                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2396
2397         if (!aconnector || !aconnector->dc_link)
2398                 return;
2399
2400         link = aconnector->dc_link;
2401         if (link->connector_signal != SIGNAL_TYPE_EDP)
2402                 return;
2403
2404         conn_base = &aconnector->base;
2405         adev = drm_to_adev(conn_base->dev);
2406         dm = &adev->dm;
2407         caps = &dm->backlight_caps;
2408         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2409         caps->aux_support = false;
2410         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2411         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2412
2413         if (caps->ext_caps->bits.oled == 1 ||
2414             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2415             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2416                 caps->aux_support = true;
2417
2418         if (amdgpu_backlight == 0)
2419                 caps->aux_support = false;
2420         else if (amdgpu_backlight == 1)
2421                 caps->aux_support = true;
2422
2423         /* From the specification (CTA-861-G), for calculating the maximum
2424          * luminance we need to use:
2425          *      Luminance = 50*2**(CV/32)
2426          * Where CV is a one-byte value.
2427          * For calculating this expression we may need float point precision;
2428          * to avoid this complexity level, we take advantage that CV is divided
2429          * by a constant. From the Euclids division algorithm, we know that CV
2430          * can be written as: CV = 32*q + r. Next, we replace CV in the
2431          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2432          * need to pre-compute the value of r/32. For pre-computing the values
2433          * We just used the following Ruby line:
2434          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2435          * The results of the above expressions can be verified at
2436          * pre_computed_values.
2437          */
2438         q = max_cll >> 5;
2439         r = max_cll % 32;
2440         max = (1 << q) * pre_computed_values[r];
2441
2442         // min luminance: maxLum * (CV/255)^2 / 100
2443         q = DIV_ROUND_CLOSEST(min_cll, 255);
2444         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2445
2446         caps->aux_max_input_signal = max;
2447         caps->aux_min_input_signal = min;
2448 }
2449
2450 void amdgpu_dm_update_connector_after_detect(
2451                 struct amdgpu_dm_connector *aconnector)
2452 {
2453         struct drm_connector *connector = &aconnector->base;
2454         struct drm_device *dev = connector->dev;
2455         struct dc_sink *sink;
2456
2457         /* MST handled by drm_mst framework */
2458         if (aconnector->mst_mgr.mst_state == true)
2459                 return;
2460
2461         sink = aconnector->dc_link->local_sink;
2462         if (sink)
2463                 dc_sink_retain(sink);
2464
2465         /*
2466          * Edid mgmt connector gets first update only in mode_valid hook and then
2467          * the connector sink is set to either fake or physical sink depends on link status.
2468          * Skip if already done during boot.
2469          */
2470         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2471                         && aconnector->dc_em_sink) {
2472
2473                 /*
2474                  * For S3 resume with headless use eml_sink to fake stream
2475                  * because on resume connector->sink is set to NULL
2476                  */
2477                 mutex_lock(&dev->mode_config.mutex);
2478
2479                 if (sink) {
2480                         if (aconnector->dc_sink) {
2481                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2482                                 /*
2483                                  * retain and release below are used to
2484                                  * bump up refcount for sink because the link doesn't point
2485                                  * to it anymore after disconnect, so on next crtc to connector
2486                                  * reshuffle by UMD we will get into unwanted dc_sink release
2487                                  */
2488                                 dc_sink_release(aconnector->dc_sink);
2489                         }
2490                         aconnector->dc_sink = sink;
2491                         dc_sink_retain(aconnector->dc_sink);
2492                         amdgpu_dm_update_freesync_caps(connector,
2493                                         aconnector->edid);
2494                 } else {
2495                         amdgpu_dm_update_freesync_caps(connector, NULL);
2496                         if (!aconnector->dc_sink) {
2497                                 aconnector->dc_sink = aconnector->dc_em_sink;
2498                                 dc_sink_retain(aconnector->dc_sink);
2499                         }
2500                 }
2501
2502                 mutex_unlock(&dev->mode_config.mutex);
2503
2504                 if (sink)
2505                         dc_sink_release(sink);
2506                 return;
2507         }
2508
2509         /*
2510          * TODO: temporary guard to look for proper fix
2511          * if this sink is MST sink, we should not do anything
2512          */
2513         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2514                 dc_sink_release(sink);
2515                 return;
2516         }
2517
2518         if (aconnector->dc_sink == sink) {
2519                 /*
2520                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2521                  * Do nothing!!
2522                  */
2523                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2524                                 aconnector->connector_id);
2525                 if (sink)
2526                         dc_sink_release(sink);
2527                 return;
2528         }
2529
2530         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2531                 aconnector->connector_id, aconnector->dc_sink, sink);
2532
2533         mutex_lock(&dev->mode_config.mutex);
2534
2535         /*
2536          * 1. Update status of the drm connector
2537          * 2. Send an event and let userspace tell us what to do
2538          */
2539         if (sink) {
2540                 /*
2541                  * TODO: check if we still need the S3 mode update workaround.
2542                  * If yes, put it here.
2543                  */
2544                 if (aconnector->dc_sink) {
2545                         amdgpu_dm_update_freesync_caps(connector, NULL);
2546                         dc_sink_release(aconnector->dc_sink);
2547                 }
2548
2549                 aconnector->dc_sink = sink;
2550                 dc_sink_retain(aconnector->dc_sink);
2551                 if (sink->dc_edid.length == 0) {
2552                         aconnector->edid = NULL;
2553                         if (aconnector->dc_link->aux_mode) {
2554                                 drm_dp_cec_unset_edid(
2555                                         &aconnector->dm_dp_aux.aux);
2556                         }
2557                 } else {
2558                         aconnector->edid =
2559                                 (struct edid *)sink->dc_edid.raw_edid;
2560
2561                         drm_connector_update_edid_property(connector,
2562                                                            aconnector->edid);
2563                         if (aconnector->dc_link->aux_mode)
2564                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2565                                                     aconnector->edid);
2566                 }
2567
2568                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2569                 update_connector_ext_caps(aconnector);
2570         } else {
2571                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2572                 amdgpu_dm_update_freesync_caps(connector, NULL);
2573                 drm_connector_update_edid_property(connector, NULL);
2574                 aconnector->num_modes = 0;
2575                 dc_sink_release(aconnector->dc_sink);
2576                 aconnector->dc_sink = NULL;
2577                 aconnector->edid = NULL;
2578 #ifdef CONFIG_DRM_AMD_DC_HDCP
2579                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2580                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2581                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2582 #endif
2583         }
2584
2585         mutex_unlock(&dev->mode_config.mutex);
2586
2587         update_subconnector_property(aconnector);
2588
2589         if (sink)
2590                 dc_sink_release(sink);
2591 }
2592
2593 static void handle_hpd_irq(void *param)
2594 {
2595         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2596         struct drm_connector *connector = &aconnector->base;
2597         struct drm_device *dev = connector->dev;
2598         enum dc_connection_type new_connection_type = dc_connection_none;
2599         struct amdgpu_device *adev = drm_to_adev(dev);
2600 #ifdef CONFIG_DRM_AMD_DC_HDCP
2601         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2602 #endif
2603
2604         if (adev->dm.disable_hpd_irq)
2605                 return;
2606
2607         /*
2608          * In case of failure or MST no need to update connector status or notify the OS
2609          * since (for MST case) MST does this in its own context.
2610          */
2611         mutex_lock(&aconnector->hpd_lock);
2612
2613 #ifdef CONFIG_DRM_AMD_DC_HDCP
2614         if (adev->dm.hdcp_workqueue) {
2615                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2616                 dm_con_state->update_hdcp = true;
2617         }
2618 #endif
2619         if (aconnector->fake_enable)
2620                 aconnector->fake_enable = false;
2621
2622         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2623                 DRM_ERROR("KMS: Failed to detect connector\n");
2624
2625         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2626                 emulated_link_detect(aconnector->dc_link);
2627
2628
2629                 drm_modeset_lock_all(dev);
2630                 dm_restore_drm_connector_state(dev, connector);
2631                 drm_modeset_unlock_all(dev);
2632
2633                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2634                         drm_kms_helper_hotplug_event(dev);
2635
2636         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2637                 if (new_connection_type == dc_connection_none &&
2638                     aconnector->dc_link->type == dc_connection_none)
2639                         dm_set_dpms_off(aconnector->dc_link);
2640
2641                 amdgpu_dm_update_connector_after_detect(aconnector);
2642
2643                 drm_modeset_lock_all(dev);
2644                 dm_restore_drm_connector_state(dev, connector);
2645                 drm_modeset_unlock_all(dev);
2646
2647                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2648                         drm_kms_helper_hotplug_event(dev);
2649         }
2650         mutex_unlock(&aconnector->hpd_lock);
2651
2652 }
2653
2654 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2655 {
2656         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2657         uint8_t dret;
2658         bool new_irq_handled = false;
2659         int dpcd_addr;
2660         int dpcd_bytes_to_read;
2661
2662         const int max_process_count = 30;
2663         int process_count = 0;
2664
2665         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2666
2667         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2668                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2669                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2670                 dpcd_addr = DP_SINK_COUNT;
2671         } else {
2672                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2673                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2674                 dpcd_addr = DP_SINK_COUNT_ESI;
2675         }
2676
2677         dret = drm_dp_dpcd_read(
2678                 &aconnector->dm_dp_aux.aux,
2679                 dpcd_addr,
2680                 esi,
2681                 dpcd_bytes_to_read);
2682
2683         while (dret == dpcd_bytes_to_read &&
2684                 process_count < max_process_count) {
2685                 uint8_t retry;
2686                 dret = 0;
2687
2688                 process_count++;
2689
2690                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2691                 /* handle HPD short pulse irq */
2692                 if (aconnector->mst_mgr.mst_state)
2693                         drm_dp_mst_hpd_irq(
2694                                 &aconnector->mst_mgr,
2695                                 esi,
2696                                 &new_irq_handled);
2697
2698                 if (new_irq_handled) {
2699                         /* ACK at DPCD to notify down stream */
2700                         const int ack_dpcd_bytes_to_write =
2701                                 dpcd_bytes_to_read - 1;
2702
2703                         for (retry = 0; retry < 3; retry++) {
2704                                 uint8_t wret;
2705
2706                                 wret = drm_dp_dpcd_write(
2707                                         &aconnector->dm_dp_aux.aux,
2708                                         dpcd_addr + 1,
2709                                         &esi[1],
2710                                         ack_dpcd_bytes_to_write);
2711                                 if (wret == ack_dpcd_bytes_to_write)
2712                                         break;
2713                         }
2714
2715                         /* check if there is new irq to be handled */
2716                         dret = drm_dp_dpcd_read(
2717                                 &aconnector->dm_dp_aux.aux,
2718                                 dpcd_addr,
2719                                 esi,
2720                                 dpcd_bytes_to_read);
2721
2722                         new_irq_handled = false;
2723                 } else {
2724                         break;
2725                 }
2726         }
2727
2728         if (process_count == max_process_count)
2729                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2730 }
2731
2732 static void handle_hpd_rx_irq(void *param)
2733 {
2734         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2735         struct drm_connector *connector = &aconnector->base;
2736         struct drm_device *dev = connector->dev;
2737         struct dc_link *dc_link = aconnector->dc_link;
2738         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2739         bool result = false;
2740         enum dc_connection_type new_connection_type = dc_connection_none;
2741         struct amdgpu_device *adev = drm_to_adev(dev);
2742         union hpd_irq_data hpd_irq_data;
2743         bool lock_flag = 0;
2744
2745         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2746
2747         if (adev->dm.disable_hpd_irq)
2748                 return;
2749
2750
2751         /*
2752          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2753          * conflict, after implement i2c helper, this mutex should be
2754          * retired.
2755          */
2756         mutex_lock(&aconnector->hpd_lock);
2757
2758         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2759
2760         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2761                 (dc_link->type == dc_connection_mst_branch)) {
2762                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2763                         result = true;
2764                         dm_handle_hpd_rx_irq(aconnector);
2765                         goto out;
2766                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2767                         result = false;
2768                         dm_handle_hpd_rx_irq(aconnector);
2769                         goto out;
2770                 }
2771         }
2772
2773         /*
2774          * TODO: We need the lock to avoid touching DC state while it's being
2775          * modified during automated compliance testing, or when link loss
2776          * happens. While this should be split into subhandlers and proper
2777          * interfaces to avoid having to conditionally lock like this in the
2778          * outer layer, we need this workaround temporarily to allow MST
2779          * lightup in some scenarios to avoid timeout.
2780          */
2781         if (!amdgpu_in_reset(adev) &&
2782             (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2783              hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2784                 mutex_lock(&adev->dm.dc_lock);
2785                 lock_flag = 1;
2786         }
2787
2788 #ifdef CONFIG_DRM_AMD_DC_HDCP
2789         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2790 #else
2791         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2792 #endif
2793         if (!amdgpu_in_reset(adev) && lock_flag)
2794                 mutex_unlock(&adev->dm.dc_lock);
2795
2796 out:
2797         if (result && !is_mst_root_connector) {
2798                 /* Downstream Port status changed. */
2799                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2800                         DRM_ERROR("KMS: Failed to detect connector\n");
2801
2802                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2803                         emulated_link_detect(dc_link);
2804
2805                         if (aconnector->fake_enable)
2806                                 aconnector->fake_enable = false;
2807
2808                         amdgpu_dm_update_connector_after_detect(aconnector);
2809
2810
2811                         drm_modeset_lock_all(dev);
2812                         dm_restore_drm_connector_state(dev, connector);
2813                         drm_modeset_unlock_all(dev);
2814
2815                         drm_kms_helper_hotplug_event(dev);
2816                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2817
2818                         if (aconnector->fake_enable)
2819                                 aconnector->fake_enable = false;
2820
2821                         amdgpu_dm_update_connector_after_detect(aconnector);
2822
2823
2824                         drm_modeset_lock_all(dev);
2825                         dm_restore_drm_connector_state(dev, connector);
2826                         drm_modeset_unlock_all(dev);
2827
2828                         drm_kms_helper_hotplug_event(dev);
2829                 }
2830         }
2831 #ifdef CONFIG_DRM_AMD_DC_HDCP
2832         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2833                 if (adev->dm.hdcp_workqueue)
2834                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2835         }
2836 #endif
2837
2838         if (dc_link->type != dc_connection_mst_branch)
2839                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2840
2841         mutex_unlock(&aconnector->hpd_lock);
2842 }
2843
2844 static void register_hpd_handlers(struct amdgpu_device *adev)
2845 {
2846         struct drm_device *dev = adev_to_drm(adev);
2847         struct drm_connector *connector;
2848         struct amdgpu_dm_connector *aconnector;
2849         const struct dc_link *dc_link;
2850         struct dc_interrupt_params int_params = {0};
2851
2852         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2853         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2854
2855         list_for_each_entry(connector,
2856                         &dev->mode_config.connector_list, head) {
2857
2858                 aconnector = to_amdgpu_dm_connector(connector);
2859                 dc_link = aconnector->dc_link;
2860
2861                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2862                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2863                         int_params.irq_source = dc_link->irq_source_hpd;
2864
2865                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2866                                         handle_hpd_irq,
2867                                         (void *) aconnector);
2868                 }
2869
2870                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2871
2872                         /* Also register for DP short pulse (hpd_rx). */
2873                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2874                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2875
2876                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2877                                         handle_hpd_rx_irq,
2878                                         (void *) aconnector);
2879                 }
2880         }
2881 }
2882
2883 #if defined(CONFIG_DRM_AMD_DC_SI)
2884 /* Register IRQ sources and initialize IRQ callbacks */
2885 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2886 {
2887         struct dc *dc = adev->dm.dc;
2888         struct common_irq_params *c_irq_params;
2889         struct dc_interrupt_params int_params = {0};
2890         int r;
2891         int i;
2892         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2893
2894         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2895         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2896
2897         /*
2898          * Actions of amdgpu_irq_add_id():
2899          * 1. Register a set() function with base driver.
2900          *    Base driver will call set() function to enable/disable an
2901          *    interrupt in DC hardware.
2902          * 2. Register amdgpu_dm_irq_handler().
2903          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2904          *    coming from DC hardware.
2905          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2906          *    for acknowledging and handling. */
2907
2908         /* Use VBLANK interrupt */
2909         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2910                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2911                 if (r) {
2912                         DRM_ERROR("Failed to add crtc irq id!\n");
2913                         return r;
2914                 }
2915
2916                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2917                 int_params.irq_source =
2918                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2919
2920                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2921
2922                 c_irq_params->adev = adev;
2923                 c_irq_params->irq_src = int_params.irq_source;
2924
2925                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2926                                 dm_crtc_high_irq, c_irq_params);
2927         }
2928
2929         /* Use GRPH_PFLIP interrupt */
2930         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2931                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2932                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2933                 if (r) {
2934                         DRM_ERROR("Failed to add page flip irq id!\n");
2935                         return r;
2936                 }
2937
2938                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2939                 int_params.irq_source =
2940                         dc_interrupt_to_irq_source(dc, i, 0);
2941
2942                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2943
2944                 c_irq_params->adev = adev;
2945                 c_irq_params->irq_src = int_params.irq_source;
2946
2947                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2948                                 dm_pflip_high_irq, c_irq_params);
2949
2950         }
2951
2952         /* HPD */
2953         r = amdgpu_irq_add_id(adev, client_id,
2954                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2955         if (r) {
2956                 DRM_ERROR("Failed to add hpd irq id!\n");
2957                 return r;
2958         }
2959
2960         register_hpd_handlers(adev);
2961
2962         return 0;
2963 }
2964 #endif
2965
2966 /* Register IRQ sources and initialize IRQ callbacks */
2967 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2968 {
2969         struct dc *dc = adev->dm.dc;
2970         struct common_irq_params *c_irq_params;
2971         struct dc_interrupt_params int_params = {0};
2972         int r;
2973         int i;
2974         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2975
2976         if (adev->asic_type >= CHIP_VEGA10)
2977                 client_id = SOC15_IH_CLIENTID_DCE;
2978
2979         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2980         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2981
2982         /*
2983          * Actions of amdgpu_irq_add_id():
2984          * 1. Register a set() function with base driver.
2985          *    Base driver will call set() function to enable/disable an
2986          *    interrupt in DC hardware.
2987          * 2. Register amdgpu_dm_irq_handler().
2988          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2989          *    coming from DC hardware.
2990          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2991          *    for acknowledging and handling. */
2992
2993         /* Use VBLANK interrupt */
2994         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2995                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2996                 if (r) {
2997                         DRM_ERROR("Failed to add crtc irq id!\n");
2998                         return r;
2999                 }
3000
3001                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3002                 int_params.irq_source =
3003                         dc_interrupt_to_irq_source(dc, i, 0);
3004
3005                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3006
3007                 c_irq_params->adev = adev;
3008                 c_irq_params->irq_src = int_params.irq_source;
3009
3010                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3011                                 dm_crtc_high_irq, c_irq_params);
3012         }
3013
3014         /* Use VUPDATE interrupt */
3015         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3016                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3017                 if (r) {
3018                         DRM_ERROR("Failed to add vupdate irq id!\n");
3019                         return r;
3020                 }
3021
3022                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3023                 int_params.irq_source =
3024                         dc_interrupt_to_irq_source(dc, i, 0);
3025
3026                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3027
3028                 c_irq_params->adev = adev;
3029                 c_irq_params->irq_src = int_params.irq_source;
3030
3031                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3032                                 dm_vupdate_high_irq, c_irq_params);
3033         }
3034
3035         /* Use GRPH_PFLIP interrupt */
3036         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3037                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3038                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3039                 if (r) {
3040                         DRM_ERROR("Failed to add page flip irq id!\n");
3041                         return r;
3042                 }
3043
3044                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3045                 int_params.irq_source =
3046                         dc_interrupt_to_irq_source(dc, i, 0);
3047
3048                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3049
3050                 c_irq_params->adev = adev;
3051                 c_irq_params->irq_src = int_params.irq_source;
3052
3053                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3054                                 dm_pflip_high_irq, c_irq_params);
3055
3056         }
3057
3058         /* HPD */
3059         r = amdgpu_irq_add_id(adev, client_id,
3060                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3061         if (r) {
3062                 DRM_ERROR("Failed to add hpd irq id!\n");
3063                 return r;
3064         }
3065
3066         register_hpd_handlers(adev);
3067
3068         return 0;
3069 }
3070
3071 #if defined(CONFIG_DRM_AMD_DC_DCN)
3072 /* Register IRQ sources and initialize IRQ callbacks */
3073 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3074 {
3075         struct dc *dc = adev->dm.dc;
3076         struct common_irq_params *c_irq_params;
3077         struct dc_interrupt_params int_params = {0};
3078         int r;
3079         int i;
3080 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3081         static const unsigned int vrtl_int_srcid[] = {
3082                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3083                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3084                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3085                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3086                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3087                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3088         };
3089 #endif
3090
3091         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3092         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3093
3094         /*
3095          * Actions of amdgpu_irq_add_id():
3096          * 1. Register a set() function with base driver.
3097          *    Base driver will call set() function to enable/disable an
3098          *    interrupt in DC hardware.
3099          * 2. Register amdgpu_dm_irq_handler().
3100          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3101          *    coming from DC hardware.
3102          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3103          *    for acknowledging and handling.
3104          */
3105
3106         /* Use VSTARTUP interrupt */
3107         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3108                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3109                         i++) {
3110                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3111
3112                 if (r) {
3113                         DRM_ERROR("Failed to add crtc irq id!\n");
3114                         return r;
3115                 }
3116
3117                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3118                 int_params.irq_source =
3119                         dc_interrupt_to_irq_source(dc, i, 0);
3120
3121                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3122
3123                 c_irq_params->adev = adev;
3124                 c_irq_params->irq_src = int_params.irq_source;
3125
3126                 amdgpu_dm_irq_register_interrupt(
3127                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3128         }
3129
3130         /* Use otg vertical line interrupt */
3131 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3132         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3133                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3134                                 vrtl_int_srcid[i], &adev->vline0_irq);
3135
3136                 if (r) {
3137                         DRM_ERROR("Failed to add vline0 irq id!\n");
3138                         return r;
3139                 }
3140
3141                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3142                 int_params.irq_source =
3143                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3144
3145                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3146                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3147                         break;
3148                 }
3149
3150                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3151                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3152
3153                 c_irq_params->adev = adev;
3154                 c_irq_params->irq_src = int_params.irq_source;
3155
3156                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3157                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3158         }
3159 #endif
3160
3161         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3162          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3163          * to trigger at end of each vblank, regardless of state of the lock,
3164          * matching DCE behaviour.
3165          */
3166         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3167              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3168              i++) {
3169                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3170
3171                 if (r) {
3172                         DRM_ERROR("Failed to add vupdate irq id!\n");
3173                         return r;
3174                 }
3175
3176                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3177                 int_params.irq_source =
3178                         dc_interrupt_to_irq_source(dc, i, 0);
3179
3180                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3181
3182                 c_irq_params->adev = adev;
3183                 c_irq_params->irq_src = int_params.irq_source;
3184
3185                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3186                                 dm_vupdate_high_irq, c_irq_params);
3187         }
3188
3189         /* Use GRPH_PFLIP interrupt */
3190         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3191                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3192                         i++) {
3193                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3194                 if (r) {
3195                         DRM_ERROR("Failed to add page flip irq id!\n");
3196                         return r;
3197                 }
3198
3199                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3200                 int_params.irq_source =
3201                         dc_interrupt_to_irq_source(dc, i, 0);
3202
3203                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3204
3205                 c_irq_params->adev = adev;
3206                 c_irq_params->irq_src = int_params.irq_source;
3207
3208                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3209                                 dm_pflip_high_irq, c_irq_params);
3210
3211         }
3212
3213         /* HPD */
3214         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3215                         &adev->hpd_irq);
3216         if (r) {
3217                 DRM_ERROR("Failed to add hpd irq id!\n");
3218                 return r;
3219         }
3220
3221         register_hpd_handlers(adev);
3222
3223         return 0;
3224 }
3225 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3226 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3227 {
3228         struct dc *dc = adev->dm.dc;
3229         struct common_irq_params *c_irq_params;
3230         struct dc_interrupt_params int_params = {0};
3231         int r, i;
3232
3233         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3234         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3235
3236         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3237                         &adev->dmub_outbox_irq);
3238         if (r) {
3239                 DRM_ERROR("Failed to add outbox irq id!\n");
3240                 return r;
3241         }
3242
3243         if (dc->ctx->dmub_srv) {
3244                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3245                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3246                 int_params.irq_source =
3247                 dc_interrupt_to_irq_source(dc, i, 0);
3248
3249                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3250
3251                 c_irq_params->adev = adev;
3252                 c_irq_params->irq_src = int_params.irq_source;
3253
3254                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3255                                 dm_dmub_outbox1_low_irq, c_irq_params);
3256         }
3257
3258         return 0;
3259 }
3260 #endif
3261
3262 /*
3263  * Acquires the lock for the atomic state object and returns
3264  * the new atomic state.
3265  *
3266  * This should only be called during atomic check.
3267  */
3268 static int dm_atomic_get_state(struct drm_atomic_state *state,
3269                                struct dm_atomic_state **dm_state)
3270 {
3271         struct drm_device *dev = state->dev;
3272         struct amdgpu_device *adev = drm_to_adev(dev);
3273         struct amdgpu_display_manager *dm = &adev->dm;
3274         struct drm_private_state *priv_state;
3275
3276         if (*dm_state)
3277                 return 0;
3278
3279         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3280         if (IS_ERR(priv_state))
3281                 return PTR_ERR(priv_state);
3282
3283         *dm_state = to_dm_atomic_state(priv_state);
3284
3285         return 0;
3286 }
3287
3288 static struct dm_atomic_state *
3289 dm_atomic_get_new_state(struct drm_atomic_state *state)
3290 {
3291         struct drm_device *dev = state->dev;
3292         struct amdgpu_device *adev = drm_to_adev(dev);
3293         struct amdgpu_display_manager *dm = &adev->dm;
3294         struct drm_private_obj *obj;
3295         struct drm_private_state *new_obj_state;
3296         int i;
3297
3298         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3299                 if (obj->funcs == dm->atomic_obj.funcs)
3300                         return to_dm_atomic_state(new_obj_state);
3301         }
3302
3303         return NULL;
3304 }
3305
3306 static struct drm_private_state *
3307 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3308 {
3309         struct dm_atomic_state *old_state, *new_state;
3310
3311         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3312         if (!new_state)
3313                 return NULL;
3314
3315         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3316
3317         old_state = to_dm_atomic_state(obj->state);
3318
3319         if (old_state && old_state->context)
3320                 new_state->context = dc_copy_state(old_state->context);
3321
3322         if (!new_state->context) {
3323                 kfree(new_state);
3324                 return NULL;
3325         }
3326
3327         return &new_state->base;
3328 }
3329
3330 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3331                                     struct drm_private_state *state)
3332 {
3333         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3334
3335         if (dm_state && dm_state->context)
3336                 dc_release_state(dm_state->context);
3337
3338         kfree(dm_state);
3339 }
3340
3341 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3342         .atomic_duplicate_state = dm_atomic_duplicate_state,
3343         .atomic_destroy_state = dm_atomic_destroy_state,
3344 };
3345
3346 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3347 {
3348         struct dm_atomic_state *state;
3349         int r;
3350
3351         adev->mode_info.mode_config_initialized = true;
3352
3353         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3354         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3355
3356         adev_to_drm(adev)->mode_config.max_width = 16384;
3357         adev_to_drm(adev)->mode_config.max_height = 16384;
3358
3359         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3360         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3361         /* indicates support for immediate flip */
3362         adev_to_drm(adev)->mode_config.async_page_flip = true;
3363
3364         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3365
3366         state = kzalloc(sizeof(*state), GFP_KERNEL);
3367         if (!state)
3368                 return -ENOMEM;
3369
3370         state->context = dc_create_state(adev->dm.dc);
3371         if (!state->context) {
3372                 kfree(state);
3373                 return -ENOMEM;
3374         }
3375
3376         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3377
3378         drm_atomic_private_obj_init(adev_to_drm(adev),
3379                                     &adev->dm.atomic_obj,
3380                                     &state->base,
3381                                     &dm_atomic_state_funcs);
3382
3383         r = amdgpu_display_modeset_create_props(adev);
3384         if (r) {
3385                 dc_release_state(state->context);
3386                 kfree(state);
3387                 return r;
3388         }
3389
3390         r = amdgpu_dm_audio_init(adev);
3391         if (r) {
3392                 dc_release_state(state->context);
3393                 kfree(state);
3394                 return r;
3395         }
3396
3397         return 0;
3398 }
3399
3400 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3401 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3402 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3403
3404 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3405         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3406
3407 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3408 {
3409 #if defined(CONFIG_ACPI)
3410         struct amdgpu_dm_backlight_caps caps;
3411
3412         memset(&caps, 0, sizeof(caps));
3413
3414         if (dm->backlight_caps.caps_valid)
3415                 return;
3416
3417         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3418         if (caps.caps_valid) {
3419                 dm->backlight_caps.caps_valid = true;
3420                 if (caps.aux_support)
3421                         return;
3422                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3423                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3424         } else {
3425                 dm->backlight_caps.min_input_signal =
3426                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3427                 dm->backlight_caps.max_input_signal =
3428                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3429         }
3430 #else
3431         if (dm->backlight_caps.aux_support)
3432                 return;
3433
3434         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3435         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3436 #endif
3437 }
3438
3439 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3440                                 unsigned *min, unsigned *max)
3441 {
3442         if (!caps)
3443                 return 0;
3444
3445         if (caps->aux_support) {
3446                 // Firmware limits are in nits, DC API wants millinits.
3447                 *max = 1000 * caps->aux_max_input_signal;
3448                 *min = 1000 * caps->aux_min_input_signal;
3449         } else {
3450                 // Firmware limits are 8-bit, PWM control is 16-bit.
3451                 *max = 0x101 * caps->max_input_signal;
3452                 *min = 0x101 * caps->min_input_signal;
3453         }
3454         return 1;
3455 }
3456
3457 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3458                                         uint32_t brightness)
3459 {
3460         unsigned min, max;
3461
3462         if (!get_brightness_range(caps, &min, &max))
3463                 return brightness;
3464
3465         // Rescale 0..255 to min..max
3466         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3467                                        AMDGPU_MAX_BL_LEVEL);
3468 }
3469
3470 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3471                                       uint32_t brightness)
3472 {
3473         unsigned min, max;
3474
3475         if (!get_brightness_range(caps, &min, &max))
3476                 return brightness;
3477
3478         if (brightness < min)
3479                 return 0;
3480         // Rescale min..max to 0..255
3481         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3482                                  max - min);
3483 }
3484
3485 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3486                                          u32 user_brightness)
3487 {
3488         struct amdgpu_dm_backlight_caps caps;
3489         struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3490         u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3491         bool rc;
3492         int i;
3493
3494         amdgpu_dm_update_backlight_caps(dm);
3495         caps = dm->backlight_caps;
3496
3497         for (i = 0; i < dm->num_of_edps; i++) {
3498                 dm->brightness[i] = user_brightness;
3499                 brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3500                 link[i] = (struct dc_link *)dm->backlight_link[i];
3501         }
3502
3503         /* Change brightness based on AUX property */
3504         if (caps.aux_support) {
3505                 for (i = 0; i < dm->num_of_edps; i++) {
3506                         rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3507                                 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3508                         if (!rc) {
3509                                 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3510                                 break;
3511                         }
3512                 }
3513         } else {
3514                 for (i = 0; i < dm->num_of_edps; i++) {
3515                         rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3516                         if (!rc) {
3517                                 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
3518                                 break;
3519                         }
3520                 }
3521         }
3522
3523         return rc ? 0 : 1;
3524 }
3525
3526 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3527 {
3528         struct amdgpu_display_manager *dm = bl_get_data(bd);
3529
3530         amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3531
3532         return 0;
3533 }
3534
3535 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3536 {
3537         struct amdgpu_dm_backlight_caps caps;
3538
3539         amdgpu_dm_update_backlight_caps(dm);
3540         caps = dm->backlight_caps;
3541
3542         if (caps.aux_support) {
3543                 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3544                 u32 avg, peak;
3545                 bool rc;
3546
3547                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3548                 if (!rc)
3549                         return dm->brightness[0];
3550                 return convert_brightness_to_user(&caps, avg);
3551         } else {
3552                 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3553
3554                 if (ret == DC_ERROR_UNEXPECTED)
3555                         return dm->brightness[0];
3556                 return convert_brightness_to_user(&caps, ret);
3557         }
3558 }
3559
3560 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3561 {
3562         struct amdgpu_display_manager *dm = bl_get_data(bd);
3563
3564         return amdgpu_dm_backlight_get_level(dm);
3565 }
3566
3567 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3568         .options = BL_CORE_SUSPENDRESUME,
3569         .get_brightness = amdgpu_dm_backlight_get_brightness,
3570         .update_status  = amdgpu_dm_backlight_update_status,
3571 };
3572
3573 static void
3574 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3575 {
3576         char bl_name[16];
3577         struct backlight_properties props = { 0 };
3578         int i;
3579
3580         amdgpu_dm_update_backlight_caps(dm);
3581         for (i = 0; i < dm->num_of_edps; i++)
3582                 dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3583
3584         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3585         props.brightness = AMDGPU_MAX_BL_LEVEL;
3586         props.type = BACKLIGHT_RAW;
3587
3588         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3589                  adev_to_drm(dm->adev)->primary->index);
3590
3591         dm->backlight_dev = backlight_device_register(bl_name,
3592                                                       adev_to_drm(dm->adev)->dev,
3593                                                       dm,
3594                                                       &amdgpu_dm_backlight_ops,
3595                                                       &props);
3596
3597         if (IS_ERR(dm->backlight_dev))
3598                 DRM_ERROR("DM: Backlight registration failed!\n");
3599         else
3600                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3601 }
3602
3603 #endif
3604
3605 static int initialize_plane(struct amdgpu_display_manager *dm,
3606                             struct amdgpu_mode_info *mode_info, int plane_id,
3607                             enum drm_plane_type plane_type,
3608                             const struct dc_plane_cap *plane_cap)
3609 {
3610         struct drm_plane *plane;
3611         unsigned long possible_crtcs;
3612         int ret = 0;
3613
3614         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3615         if (!plane) {
3616                 DRM_ERROR("KMS: Failed to allocate plane\n");
3617                 return -ENOMEM;
3618         }
3619         plane->type = plane_type;
3620
3621         /*
3622          * HACK: IGT tests expect that the primary plane for a CRTC
3623          * can only have one possible CRTC. Only expose support for
3624          * any CRTC if they're not going to be used as a primary plane
3625          * for a CRTC - like overlay or underlay planes.
3626          */
3627         possible_crtcs = 1 << plane_id;
3628         if (plane_id >= dm->dc->caps.max_streams)
3629                 possible_crtcs = 0xff;
3630
3631         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3632
3633         if (ret) {
3634                 DRM_ERROR("KMS: Failed to initialize plane\n");
3635                 kfree(plane);
3636                 return ret;
3637         }
3638
3639         if (mode_info)
3640                 mode_info->planes[plane_id] = plane;
3641
3642         return ret;
3643 }
3644
3645
3646 static void register_backlight_device(struct amdgpu_display_manager *dm,
3647                                       struct dc_link *link)
3648 {
3649 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3650         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3651
3652         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3653             link->type != dc_connection_none) {
3654                 /*
3655                  * Event if registration failed, we should continue with
3656                  * DM initialization because not having a backlight control
3657                  * is better then a black screen.
3658                  */
3659                 if (!dm->backlight_dev)
3660                         amdgpu_dm_register_backlight_device(dm);
3661
3662                 if (dm->backlight_dev) {
3663                         dm->backlight_link[dm->num_of_edps] = link;
3664                         dm->num_of_edps++;
3665                 }
3666         }
3667 #endif
3668 }
3669
3670
3671 /*
3672  * In this architecture, the association
3673  * connector -> encoder -> crtc
3674  * id not really requried. The crtc and connector will hold the
3675  * display_index as an abstraction to use with DAL component
3676  *
3677  * Returns 0 on success
3678  */
3679 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3680 {
3681         struct amdgpu_display_manager *dm = &adev->dm;
3682         int32_t i;
3683         struct amdgpu_dm_connector *aconnector = NULL;
3684         struct amdgpu_encoder *aencoder = NULL;
3685         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3686         uint32_t link_cnt;
3687         int32_t primary_planes;
3688         enum dc_connection_type new_connection_type = dc_connection_none;
3689         const struct dc_plane_cap *plane;
3690
3691         dm->display_indexes_num = dm->dc->caps.max_streams;
3692         /* Update the actual used number of crtc */
3693         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3694
3695         link_cnt = dm->dc->caps.max_links;
3696         if (amdgpu_dm_mode_config_init(dm->adev)) {
3697                 DRM_ERROR("DM: Failed to initialize mode config\n");
3698                 return -EINVAL;
3699         }
3700
3701         /* There is one primary plane per CRTC */
3702         primary_planes = dm->dc->caps.max_streams;
3703         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3704
3705         /*
3706          * Initialize primary planes, implicit planes for legacy IOCTLS.
3707          * Order is reversed to match iteration order in atomic check.
3708          */
3709         for (i = (primary_planes - 1); i >= 0; i--) {
3710                 plane = &dm->dc->caps.planes[i];
3711
3712                 if (initialize_plane(dm, mode_info, i,
3713                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3714                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3715                         goto fail;
3716                 }
3717         }
3718
3719         /*
3720          * Initialize overlay planes, index starting after primary planes.
3721          * These planes have a higher DRM index than the primary planes since
3722          * they should be considered as having a higher z-order.
3723          * Order is reversed to match iteration order in atomic check.
3724          *
3725          * Only support DCN for now, and only expose one so we don't encourage
3726          * userspace to use up all the pipes.
3727          */
3728         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3729                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3730
3731                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3732                         continue;
3733
3734                 if (!plane->blends_with_above || !plane->blends_with_below)
3735                         continue;
3736
3737                 if (!plane->pixel_format_support.argb8888)
3738                         continue;
3739
3740                 if (initialize_plane(dm, NULL, primary_planes + i,
3741                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3742                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3743                         goto fail;
3744                 }
3745
3746                 /* Only create one overlay plane. */
3747                 break;
3748         }
3749
3750         for (i = 0; i < dm->dc->caps.max_streams; i++)
3751                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3752                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3753                         goto fail;
3754                 }
3755
3756 #if defined(CONFIG_DRM_AMD_DC_DCN)
3757         /* Use Outbox interrupt */
3758         switch (adev->asic_type) {
3759         case CHIP_SIENNA_CICHLID:
3760         case CHIP_NAVY_FLOUNDER:
3761         case CHIP_RENOIR:
3762                 if (register_outbox_irq_handlers(dm->adev)) {
3763                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3764                         goto fail;
3765                 }
3766                 break;
3767         default:
3768                 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3769         }
3770 #endif
3771
3772         /* loops over all connectors on the board */
3773         for (i = 0; i < link_cnt; i++) {
3774                 struct dc_link *link = NULL;
3775
3776                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3777                         DRM_ERROR(
3778                                 "KMS: Cannot support more than %d display indexes\n",
3779                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3780                         continue;
3781                 }
3782
3783                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3784                 if (!aconnector)
3785                         goto fail;
3786
3787                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3788                 if (!aencoder)
3789                         goto fail;
3790
3791                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3792                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3793                         goto fail;
3794                 }
3795
3796                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3797                         DRM_ERROR("KMS: Failed to initialize connector\n");
3798                         goto fail;
3799                 }
3800
3801                 link = dc_get_link_at_index(dm->dc, i);
3802
3803                 if (!dc_link_detect_sink(link, &new_connection_type))
3804                         DRM_ERROR("KMS: Failed to detect connector\n");
3805
3806                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3807                         emulated_link_detect(link);
3808                         amdgpu_dm_update_connector_after_detect(aconnector);
3809
3810                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3811                         amdgpu_dm_update_connector_after_detect(aconnector);
3812                         register_backlight_device(dm, link);
3813                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3814                                 amdgpu_dm_set_psr_caps(link);
3815                 }
3816
3817
3818         }
3819
3820         /* Software is initialized. Now we can register interrupt handlers. */
3821         switch (adev->asic_type) {
3822 #if defined(CONFIG_DRM_AMD_DC_SI)
3823         case CHIP_TAHITI:
3824         case CHIP_PITCAIRN:
3825         case CHIP_VERDE:
3826         case CHIP_OLAND:
3827                 if (dce60_register_irq_handlers(dm->adev)) {
3828                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3829                         goto fail;
3830                 }
3831                 break;
3832 #endif
3833         case CHIP_BONAIRE:
3834         case CHIP_HAWAII:
3835         case CHIP_KAVERI:
3836         case CHIP_KABINI:
3837         case CHIP_MULLINS:
3838         case CHIP_TONGA:
3839         case CHIP_FIJI:
3840         case CHIP_CARRIZO:
3841         case CHIP_STONEY:
3842         case CHIP_POLARIS11:
3843         case CHIP_POLARIS10:
3844         case CHIP_POLARIS12:
3845         case CHIP_VEGAM:
3846         case CHIP_VEGA10:
3847         case CHIP_VEGA12:
3848         case CHIP_VEGA20:
3849                 if (dce110_register_irq_handlers(dm->adev)) {
3850                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3851                         goto fail;
3852                 }
3853                 break;
3854 #if defined(CONFIG_DRM_AMD_DC_DCN)
3855         case CHIP_RAVEN:
3856         case CHIP_NAVI12:
3857         case CHIP_NAVI10:
3858         case CHIP_NAVI14:
3859         case CHIP_RENOIR:
3860         case CHIP_SIENNA_CICHLID:
3861         case CHIP_NAVY_FLOUNDER:
3862         case CHIP_DIMGREY_CAVEFISH:
3863         case CHIP_BEIGE_GOBY:
3864         case CHIP_VANGOGH:
3865                 if (dcn10_register_irq_handlers(dm->adev)) {
3866                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3867                         goto fail;
3868                 }
3869                 break;
3870 #endif
3871         default:
3872                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3873                 goto fail;
3874         }
3875
3876         return 0;
3877 fail:
3878         kfree(aencoder);
3879         kfree(aconnector);
3880
3881         return -EINVAL;
3882 }
3883
3884 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3885 {
3886         drm_mode_config_cleanup(dm->ddev);
3887         drm_atomic_private_obj_fini(&dm->atomic_obj);
3888         return;
3889 }
3890
3891 /******************************************************************************
3892  * amdgpu_display_funcs functions
3893  *****************************************************************************/
3894
3895 /*
3896  * dm_bandwidth_update - program display watermarks
3897  *
3898  * @adev: amdgpu_device pointer
3899  *
3900  * Calculate and program the display watermarks and line buffer allocation.
3901  */
3902 static void dm_bandwidth_update(struct amdgpu_device *adev)
3903 {
3904         /* TODO: implement later */
3905 }
3906
3907 static const struct amdgpu_display_funcs dm_display_funcs = {
3908         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3909         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3910         .backlight_set_level = NULL, /* never called for DC */
3911         .backlight_get_level = NULL, /* never called for DC */
3912         .hpd_sense = NULL,/* called unconditionally */
3913         .hpd_set_polarity = NULL, /* called unconditionally */
3914         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3915         .page_flip_get_scanoutpos =
3916                 dm_crtc_get_scanoutpos,/* called unconditionally */
3917         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3918         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3919 };
3920
3921 #if defined(CONFIG_DEBUG_KERNEL_DC)
3922
3923 static ssize_t s3_debug_store(struct device *device,
3924                               struct device_attribute *attr,
3925                               const char *buf,
3926                               size_t count)
3927 {
3928         int ret;
3929         int s3_state;
3930         struct drm_device *drm_dev = dev_get_drvdata(device);
3931         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3932
3933         ret = kstrtoint(buf, 0, &s3_state);
3934
3935         if (ret == 0) {
3936                 if (s3_state) {
3937                         dm_resume(adev);
3938                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3939                 } else
3940                         dm_suspend(adev);
3941         }
3942
3943         return ret == 0 ? count : 0;
3944 }
3945
3946 DEVICE_ATTR_WO(s3_debug);
3947
3948 #endif
3949
3950 static int dm_early_init(void *handle)
3951 {
3952         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3953
3954         switch (adev->asic_type) {
3955 #if defined(CONFIG_DRM_AMD_DC_SI)
3956         case CHIP_TAHITI:
3957         case CHIP_PITCAIRN:
3958         case CHIP_VERDE:
3959                 adev->mode_info.num_crtc = 6;
3960                 adev->mode_info.num_hpd = 6;
3961                 adev->mode_info.num_dig = 6;
3962                 break;
3963         case CHIP_OLAND:
3964                 adev->mode_info.num_crtc = 2;
3965                 adev->mode_info.num_hpd = 2;
3966                 adev->mode_info.num_dig = 2;
3967                 break;
3968 #endif
3969         case CHIP_BONAIRE:
3970         case CHIP_HAWAII:
3971                 adev->mode_info.num_crtc = 6;
3972                 adev->mode_info.num_hpd = 6;
3973                 adev->mode_info.num_dig = 6;
3974                 break;
3975         case CHIP_KAVERI:
3976                 adev->mode_info.num_crtc = 4;
3977                 adev->mode_info.num_hpd = 6;
3978                 adev->mode_info.num_dig = 7;
3979                 break;
3980         case CHIP_KABINI:
3981         case CHIP_MULLINS:
3982                 adev->mode_info.num_crtc = 2;
3983                 adev->mode_info.num_hpd = 6;
3984                 adev->mode_info.num_dig = 6;
3985                 break;
3986         case CHIP_FIJI:
3987         case CHIP_TONGA:
3988                 adev->mode_info.num_crtc = 6;
3989                 adev->mode_info.num_hpd = 6;
3990                 adev->mode_info.num_dig = 7;
3991                 break;
3992         case CHIP_CARRIZO:
3993                 adev->mode_info.num_crtc = 3;
3994                 adev->mode_info.num_hpd = 6;
3995                 adev->mode_info.num_dig = 9;
3996                 break;
3997         case CHIP_STONEY:
3998                 adev->mode_info.num_crtc = 2;
3999                 adev->mode_info.num_hpd = 6;
4000                 adev->mode_info.num_dig = 9;
4001                 break;
4002         case CHIP_POLARIS11:
4003         case CHIP_POLARIS12:
4004                 adev->mode_info.num_crtc = 5;
4005                 adev->mode_info.num_hpd = 5;
4006                 adev->mode_info.num_dig = 5;
4007                 break;
4008         case CHIP_POLARIS10:
4009         case CHIP_VEGAM:
4010                 adev->mode_info.num_crtc = 6;
4011                 adev->mode_info.num_hpd = 6;
4012                 adev->mode_info.num_dig = 6;
4013                 break;
4014         case CHIP_VEGA10:
4015         case CHIP_VEGA12:
4016         case CHIP_VEGA20:
4017                 adev->mode_info.num_crtc = 6;
4018                 adev->mode_info.num_hpd = 6;
4019                 adev->mode_info.num_dig = 6;
4020                 break;
4021 #if defined(CONFIG_DRM_AMD_DC_DCN)
4022         case CHIP_RAVEN:
4023         case CHIP_RENOIR:
4024         case CHIP_VANGOGH:
4025                 adev->mode_info.num_crtc = 4;
4026                 adev->mode_info.num_hpd = 4;
4027                 adev->mode_info.num_dig = 4;
4028                 break;
4029         case CHIP_NAVI10:
4030         case CHIP_NAVI12:
4031         case CHIP_SIENNA_CICHLID:
4032         case CHIP_NAVY_FLOUNDER:
4033                 adev->mode_info.num_crtc = 6;
4034                 adev->mode_info.num_hpd = 6;
4035                 adev->mode_info.num_dig = 6;
4036                 break;
4037         case CHIP_NAVI14:
4038         case CHIP_DIMGREY_CAVEFISH:
4039                 adev->mode_info.num_crtc = 5;
4040                 adev->mode_info.num_hpd = 5;
4041                 adev->mode_info.num_dig = 5;
4042                 break;
4043         case CHIP_BEIGE_GOBY:
4044                 adev->mode_info.num_crtc = 2;
4045                 adev->mode_info.num_hpd = 2;
4046                 adev->mode_info.num_dig = 2;
4047                 break;
4048 #endif
4049         default:
4050                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4051                 return -EINVAL;
4052         }
4053
4054         amdgpu_dm_set_irq_funcs(adev);
4055
4056         if (adev->mode_info.funcs == NULL)
4057                 adev->mode_info.funcs = &dm_display_funcs;
4058
4059         /*
4060          * Note: Do NOT change adev->audio_endpt_rreg and
4061          * adev->audio_endpt_wreg because they are initialised in
4062          * amdgpu_device_init()
4063          */
4064 #if defined(CONFIG_DEBUG_KERNEL_DC)
4065         device_create_file(
4066                 adev_to_drm(adev)->dev,
4067                 &dev_attr_s3_debug);
4068 #endif
4069
4070         return 0;
4071 }
4072
4073 static bool modeset_required(struct drm_crtc_state *crtc_state,
4074                              struct dc_stream_state *new_stream,
4075                              struct dc_stream_state *old_stream)
4076 {
4077         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4078 }
4079
4080 static bool modereset_required(struct drm_crtc_state *crtc_state)
4081 {
4082         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4083 }
4084
4085 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4086 {
4087         drm_encoder_cleanup(encoder);
4088         kfree(encoder);
4089 }
4090
4091 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4092         .destroy = amdgpu_dm_encoder_destroy,
4093 };
4094
4095
4096 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4097                                          struct drm_framebuffer *fb,
4098                                          int *min_downscale, int *max_upscale)
4099 {
4100         struct amdgpu_device *adev = drm_to_adev(dev);
4101         struct dc *dc = adev->dm.dc;
4102         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4103         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4104
4105         switch (fb->format->format) {
4106         case DRM_FORMAT_P010:
4107         case DRM_FORMAT_NV12:
4108         case DRM_FORMAT_NV21:
4109                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4110                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4111                 break;
4112
4113         case DRM_FORMAT_XRGB16161616F:
4114         case DRM_FORMAT_ARGB16161616F:
4115         case DRM_FORMAT_XBGR16161616F:
4116         case DRM_FORMAT_ABGR16161616F:
4117                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4118                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4119                 break;
4120
4121         default:
4122                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4123                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4124                 break;
4125         }
4126
4127         /*
4128          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4129          * scaling factor of 1.0 == 1000 units.
4130          */
4131         if (*max_upscale == 1)
4132                 *max_upscale = 1000;
4133
4134         if (*min_downscale == 1)
4135                 *min_downscale = 1000;
4136 }
4137
4138
4139 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4140                                 struct dc_scaling_info *scaling_info)
4141 {
4142         int scale_w, scale_h, min_downscale, max_upscale;
4143
4144         memset(scaling_info, 0, sizeof(*scaling_info));
4145
4146         /* Source is fixed 16.16 but we ignore mantissa for now... */
4147         scaling_info->src_rect.x = state->src_x >> 16;
4148         scaling_info->src_rect.y = state->src_y >> 16;
4149
4150         /*
4151          * For reasons we don't (yet) fully understand a non-zero
4152          * src_y coordinate into an NV12 buffer can cause a
4153          * system hang. To avoid hangs (and maybe be overly cautious)
4154          * let's reject both non-zero src_x and src_y.
4155          *
4156          * We currently know of only one use-case to reproduce a
4157          * scenario with non-zero src_x and src_y for NV12, which
4158          * is to gesture the YouTube Android app into full screen
4159          * on ChromeOS.
4160          */
4161         if (state->fb &&
4162             state->fb->format->format == DRM_FORMAT_NV12 &&
4163             (scaling_info->src_rect.x != 0 ||
4164              scaling_info->src_rect.y != 0))
4165                 return -EINVAL;
4166
4167         scaling_info->src_rect.width = state->src_w >> 16;
4168         if (scaling_info->src_rect.width == 0)
4169                 return -EINVAL;
4170
4171         scaling_info->src_rect.height = state->src_h >> 16;
4172         if (scaling_info->src_rect.height == 0)
4173                 return -EINVAL;
4174
4175         scaling_info->dst_rect.x = state->crtc_x;
4176         scaling_info->dst_rect.y = state->crtc_y;
4177
4178         if (state->crtc_w == 0)
4179                 return -EINVAL;
4180
4181         scaling_info->dst_rect.width = state->crtc_w;
4182
4183         if (state->crtc_h == 0)
4184                 return -EINVAL;
4185
4186         scaling_info->dst_rect.height = state->crtc_h;
4187
4188         /* DRM doesn't specify clipping on destination output. */
4189         scaling_info->clip_rect = scaling_info->dst_rect;
4190
4191         /* Validate scaling per-format with DC plane caps */
4192         if (state->plane && state->plane->dev && state->fb) {
4193                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4194                                              &min_downscale, &max_upscale);
4195         } else {
4196                 min_downscale = 250;
4197                 max_upscale = 16000;
4198         }
4199
4200         scale_w = scaling_info->dst_rect.width * 1000 /
4201                   scaling_info->src_rect.width;
4202
4203         if (scale_w < min_downscale || scale_w > max_upscale)
4204                 return -EINVAL;
4205
4206         scale_h = scaling_info->dst_rect.height * 1000 /
4207                   scaling_info->src_rect.height;
4208
4209         if (scale_h < min_downscale || scale_h > max_upscale)
4210                 return -EINVAL;
4211
4212         /*
4213          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4214          * assume reasonable defaults based on the format.
4215          */
4216
4217         return 0;
4218 }
4219
4220 static void
4221 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4222                                  uint64_t tiling_flags)
4223 {
4224         /* Fill GFX8 params */
4225         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4226                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4227
4228                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4229                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4230                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4231                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4232                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4233
4234                 /* XXX fix me for VI */
4235                 tiling_info->gfx8.num_banks = num_banks;
4236                 tiling_info->gfx8.array_mode =
4237                                 DC_ARRAY_2D_TILED_THIN1;
4238                 tiling_info->gfx8.tile_split = tile_split;
4239                 tiling_info->gfx8.bank_width = bankw;
4240                 tiling_info->gfx8.bank_height = bankh;
4241                 tiling_info->gfx8.tile_aspect = mtaspect;
4242                 tiling_info->gfx8.tile_mode =
4243                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4244         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4245                         == DC_ARRAY_1D_TILED_THIN1) {
4246                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4247         }
4248
4249         tiling_info->gfx8.pipe_config =
4250                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4251 }
4252
4253 static void
4254 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4255                                   union dc_tiling_info *tiling_info)
4256 {
4257         tiling_info->gfx9.num_pipes =
4258                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4259         tiling_info->gfx9.num_banks =
4260                 adev->gfx.config.gb_addr_config_fields.num_banks;
4261         tiling_info->gfx9.pipe_interleave =
4262                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4263         tiling_info->gfx9.num_shader_engines =
4264                 adev->gfx.config.gb_addr_config_fields.num_se;
4265         tiling_info->gfx9.max_compressed_frags =
4266                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4267         tiling_info->gfx9.num_rb_per_se =
4268                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4269         tiling_info->gfx9.shaderEnable = 1;
4270         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4271             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4272             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4273             adev->asic_type == CHIP_BEIGE_GOBY ||
4274             adev->asic_type == CHIP_VANGOGH)
4275                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4276 }
4277
4278 static int
4279 validate_dcc(struct amdgpu_device *adev,
4280              const enum surface_pixel_format format,
4281              const enum dc_rotation_angle rotation,
4282              const union dc_tiling_info *tiling_info,
4283              const struct dc_plane_dcc_param *dcc,
4284              const struct dc_plane_address *address,
4285              const struct plane_size *plane_size)
4286 {
4287         struct dc *dc = adev->dm.dc;
4288         struct dc_dcc_surface_param input;
4289         struct dc_surface_dcc_cap output;
4290
4291         memset(&input, 0, sizeof(input));
4292         memset(&output, 0, sizeof(output));
4293
4294         if (!dcc->enable)
4295                 return 0;
4296
4297         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4298             !dc->cap_funcs.get_dcc_compression_cap)
4299                 return -EINVAL;
4300
4301         input.format = format;
4302         input.surface_size.width = plane_size->surface_size.width;
4303         input.surface_size.height = plane_size->surface_size.height;
4304         input.swizzle_mode = tiling_info->gfx9.swizzle;
4305
4306         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4307                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4308         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4309                 input.scan = SCAN_DIRECTION_VERTICAL;
4310
4311         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4312                 return -EINVAL;
4313
4314         if (!output.capable)
4315                 return -EINVAL;
4316
4317         if (dcc->independent_64b_blks == 0 &&
4318             output.grph.rgb.independent_64b_blks != 0)
4319                 return -EINVAL;
4320
4321         return 0;
4322 }
4323
4324 static bool
4325 modifier_has_dcc(uint64_t modifier)
4326 {
4327         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4328 }
4329
4330 static unsigned
4331 modifier_gfx9_swizzle_mode(uint64_t modifier)
4332 {
4333         if (modifier == DRM_FORMAT_MOD_LINEAR)
4334                 return 0;
4335
4336         return AMD_FMT_MOD_GET(TILE, modifier);
4337 }
4338
4339 static const struct drm_format_info *
4340 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4341 {
4342         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4343 }
4344
4345 static void
4346 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4347                                     union dc_tiling_info *tiling_info,
4348                                     uint64_t modifier)
4349 {
4350         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4351         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4352         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4353         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4354
4355         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4356
4357         if (!IS_AMD_FMT_MOD(modifier))
4358                 return;
4359
4360         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4361         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4362
4363         if (adev->family >= AMDGPU_FAMILY_NV) {
4364                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4365         } else {
4366                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4367
4368                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4369         }
4370 }
4371
4372 enum dm_micro_swizzle {
4373         MICRO_SWIZZLE_Z = 0,
4374         MICRO_SWIZZLE_S = 1,
4375         MICRO_SWIZZLE_D = 2,
4376         MICRO_SWIZZLE_R = 3
4377 };
4378
4379 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4380                                           uint32_t format,
4381                                           uint64_t modifier)
4382 {
4383         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4384         const struct drm_format_info *info = drm_format_info(format);
4385         int i;
4386
4387         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4388
4389         if (!info)
4390                 return false;
4391
4392         /*
4393          * We always have to allow these modifiers:
4394          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4395          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4396          */
4397         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4398             modifier == DRM_FORMAT_MOD_INVALID) {
4399                 return true;
4400         }
4401
4402         /* Check that the modifier is on the list of the plane's supported modifiers. */
4403         for (i = 0; i < plane->modifier_count; i++) {
4404                 if (modifier == plane->modifiers[i])
4405                         break;
4406         }
4407         if (i == plane->modifier_count)
4408                 return false;
4409
4410         /*
4411          * For D swizzle the canonical modifier depends on the bpp, so check
4412          * it here.
4413          */
4414         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4415             adev->family >= AMDGPU_FAMILY_NV) {
4416                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4417                         return false;
4418         }
4419
4420         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4421             info->cpp[0] < 8)
4422                 return false;
4423
4424         if (modifier_has_dcc(modifier)) {
4425                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4426                 if (info->cpp[0] != 4)
4427                         return false;
4428                 /* We support multi-planar formats, but not when combined with
4429                  * additional DCC metadata planes. */
4430                 if (info->num_planes > 1)
4431                         return false;
4432         }
4433
4434         return true;
4435 }
4436
4437 static void
4438 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4439 {
4440         if (!*mods)
4441                 return;
4442
4443         if (*cap - *size < 1) {
4444                 uint64_t new_cap = *cap * 2;
4445                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4446
4447                 if (!new_mods) {
4448                         kfree(*mods);
4449                         *mods = NULL;
4450                         return;
4451                 }
4452
4453                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4454                 kfree(*mods);
4455                 *mods = new_mods;
4456                 *cap = new_cap;
4457         }
4458
4459         (*mods)[*size] = mod;
4460         *size += 1;
4461 }
4462
4463 static void
4464 add_gfx9_modifiers(const struct amdgpu_device *adev,
4465                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4466 {
4467         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4468         int pipe_xor_bits = min(8, pipes +
4469                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4470         int bank_xor_bits = min(8 - pipe_xor_bits,
4471                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4472         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4473                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4474
4475
4476         if (adev->family == AMDGPU_FAMILY_RV) {
4477                 /* Raven2 and later */
4478                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4479
4480                 /*
4481                  * No _D DCC swizzles yet because we only allow 32bpp, which
4482                  * doesn't support _D on DCN
4483                  */
4484
4485                 if (has_constant_encode) {
4486                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4487                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4488                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4489                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4490                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4491                                     AMD_FMT_MOD_SET(DCC, 1) |
4492                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4493                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4494                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4495                 }
4496
4497                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4498                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4499                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4500                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4501                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4502                             AMD_FMT_MOD_SET(DCC, 1) |
4503                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4504                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4505                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4506
4507                 if (has_constant_encode) {
4508                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4509                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4510                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4511                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4512                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4513                                     AMD_FMT_MOD_SET(DCC, 1) |
4514                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4515                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4516                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4517
4518                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4519                                     AMD_FMT_MOD_SET(RB, rb) |
4520                                     AMD_FMT_MOD_SET(PIPE, pipes));
4521                 }
4522
4523                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4524                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4525                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4526                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4527                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4528                             AMD_FMT_MOD_SET(DCC, 1) |
4529                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4530                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4531                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4532                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4533                             AMD_FMT_MOD_SET(RB, rb) |
4534                             AMD_FMT_MOD_SET(PIPE, pipes));
4535         }
4536
4537         /*
4538          * Only supported for 64bpp on Raven, will be filtered on format in
4539          * dm_plane_format_mod_supported.
4540          */
4541         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4542                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4543                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4544                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4545                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4546
4547         if (adev->family == AMDGPU_FAMILY_RV) {
4548                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4549                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4550                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4551                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4552                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4553         }
4554
4555         /*
4556          * Only supported for 64bpp on Raven, will be filtered on format in
4557          * dm_plane_format_mod_supported.
4558          */
4559         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4560                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4561                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4562
4563         if (adev->family == AMDGPU_FAMILY_RV) {
4564                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4565                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4566                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4567         }
4568 }
4569
4570 static void
4571 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4572                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4573 {
4574         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4575
4576         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4577                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4578                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4579                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4580                     AMD_FMT_MOD_SET(DCC, 1) |
4581                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4582                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4583                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4584
4585         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4586                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4587                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4588                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4589                     AMD_FMT_MOD_SET(DCC, 1) |
4590                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4591                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4592                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4593                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4594
4595         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4596                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4597                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4598                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4599
4600         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4601                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4602                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4603                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4604
4605
4606         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4607         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4608                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4609                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4610
4611         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4612                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4613                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4614 }
4615
4616 static void
4617 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4618                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4619 {
4620         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4621         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4622
4623         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4624                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4625                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4626                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4627                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4628                     AMD_FMT_MOD_SET(DCC, 1) |
4629                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4630                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4631                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4632                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4633
4634         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4635                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4636                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4637                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4638                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4639                     AMD_FMT_MOD_SET(DCC, 1) |
4640                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4641                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4642                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4643                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4644                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4645
4646         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4647                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4648                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4649                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4650                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4651
4652         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4653                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4654                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4655                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4656                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4657
4658         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4659         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4660                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4661                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4662
4663         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4664                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4665                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4666 }
4667
4668 static int
4669 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4670 {
4671         uint64_t size = 0, capacity = 128;
4672         *mods = NULL;
4673
4674         /* We have not hooked up any pre-GFX9 modifiers. */
4675         if (adev->family < AMDGPU_FAMILY_AI)
4676                 return 0;
4677
4678         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4679
4680         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4681                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4682                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4683                 return *mods ? 0 : -ENOMEM;
4684         }
4685
4686         switch (adev->family) {
4687         case AMDGPU_FAMILY_AI:
4688         case AMDGPU_FAMILY_RV:
4689                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4690                 break;
4691         case AMDGPU_FAMILY_NV:
4692         case AMDGPU_FAMILY_VGH:
4693                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4694                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4695                 else
4696                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4697                 break;
4698         }
4699
4700         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4701
4702         /* INVALID marks the end of the list. */
4703         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4704
4705         if (!*mods)
4706                 return -ENOMEM;
4707
4708         return 0;
4709 }
4710
4711 static int
4712 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4713                                           const struct amdgpu_framebuffer *afb,
4714                                           const enum surface_pixel_format format,
4715                                           const enum dc_rotation_angle rotation,
4716                                           const struct plane_size *plane_size,
4717                                           union dc_tiling_info *tiling_info,
4718                                           struct dc_plane_dcc_param *dcc,
4719                                           struct dc_plane_address *address,
4720                                           const bool force_disable_dcc)
4721 {
4722         const uint64_t modifier = afb->base.modifier;
4723         int ret;
4724
4725         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4726         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4727
4728         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4729                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4730
4731                 dcc->enable = 1;
4732                 dcc->meta_pitch = afb->base.pitches[1];
4733                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4734
4735                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4736                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4737         }
4738
4739         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4740         if (ret)
4741                 return ret;
4742
4743         return 0;
4744 }
4745
4746 static int
4747 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4748                              const struct amdgpu_framebuffer *afb,
4749                              const enum surface_pixel_format format,
4750                              const enum dc_rotation_angle rotation,
4751                              const uint64_t tiling_flags,
4752                              union dc_tiling_info *tiling_info,
4753                              struct plane_size *plane_size,
4754                              struct dc_plane_dcc_param *dcc,
4755                              struct dc_plane_address *address,
4756                              bool tmz_surface,
4757                              bool force_disable_dcc)
4758 {
4759         const struct drm_framebuffer *fb = &afb->base;
4760         int ret;
4761
4762         memset(tiling_info, 0, sizeof(*tiling_info));
4763         memset(plane_size, 0, sizeof(*plane_size));
4764         memset(dcc, 0, sizeof(*dcc));
4765         memset(address, 0, sizeof(*address));
4766
4767         address->tmz_surface = tmz_surface;
4768
4769         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4770                 uint64_t addr = afb->address + fb->offsets[0];
4771
4772                 plane_size->surface_size.x = 0;
4773                 plane_size->surface_size.y = 0;
4774                 plane_size->surface_size.width = fb->width;
4775                 plane_size->surface_size.height = fb->height;
4776                 plane_size->surface_pitch =
4777                         fb->pitches[0] / fb->format->cpp[0];
4778
4779                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4780                 address->grph.addr.low_part = lower_32_bits(addr);
4781                 address->grph.addr.high_part = upper_32_bits(addr);
4782         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4783                 uint64_t luma_addr = afb->address + fb->offsets[0];
4784                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4785
4786                 plane_size->surface_size.x = 0;
4787                 plane_size->surface_size.y = 0;
4788                 plane_size->surface_size.width = fb->width;
4789                 plane_size->surface_size.height = fb->height;
4790                 plane_size->surface_pitch =
4791                         fb->pitches[0] / fb->format->cpp[0];
4792
4793                 plane_size->chroma_size.x = 0;
4794                 plane_size->chroma_size.y = 0;
4795                 /* TODO: set these based on surface format */
4796                 plane_size->chroma_size.width = fb->width / 2;
4797                 plane_size->chroma_size.height = fb->height / 2;
4798
4799                 plane_size->chroma_pitch =
4800                         fb->pitches[1] / fb->format->cpp[1];
4801
4802                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4803                 address->video_progressive.luma_addr.low_part =
4804                         lower_32_bits(luma_addr);
4805                 address->video_progressive.luma_addr.high_part =
4806                         upper_32_bits(luma_addr);
4807                 address->video_progressive.chroma_addr.low_part =
4808                         lower_32_bits(chroma_addr);
4809                 address->video_progressive.chroma_addr.high_part =
4810                         upper_32_bits(chroma_addr);
4811         }
4812
4813         if (adev->family >= AMDGPU_FAMILY_AI) {
4814                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4815                                                                 rotation, plane_size,
4816                                                                 tiling_info, dcc,
4817                                                                 address,
4818                                                                 force_disable_dcc);
4819                 if (ret)
4820                         return ret;
4821         } else {
4822                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4823         }
4824
4825         return 0;
4826 }
4827
4828 static void
4829 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4830                                bool *per_pixel_alpha, bool *global_alpha,
4831                                int *global_alpha_value)
4832 {
4833         *per_pixel_alpha = false;
4834         *global_alpha = false;
4835         *global_alpha_value = 0xff;
4836
4837         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4838                 return;
4839
4840         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4841                 static const uint32_t alpha_formats[] = {
4842                         DRM_FORMAT_ARGB8888,
4843                         DRM_FORMAT_RGBA8888,
4844                         DRM_FORMAT_ABGR8888,
4845                 };
4846                 uint32_t format = plane_state->fb->format->format;
4847                 unsigned int i;
4848
4849                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4850                         if (format == alpha_formats[i]) {
4851                                 *per_pixel_alpha = true;
4852                                 break;
4853                         }
4854                 }
4855         }
4856
4857         if (plane_state->alpha < 0xffff) {
4858                 *global_alpha = true;
4859                 *global_alpha_value = plane_state->alpha >> 8;
4860         }
4861 }
4862
4863 static int
4864 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4865                             const enum surface_pixel_format format,
4866                             enum dc_color_space *color_space)
4867 {
4868         bool full_range;
4869
4870         *color_space = COLOR_SPACE_SRGB;
4871
4872         /* DRM color properties only affect non-RGB formats. */
4873         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4874                 return 0;
4875
4876         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4877
4878         switch (plane_state->color_encoding) {
4879         case DRM_COLOR_YCBCR_BT601:
4880                 if (full_range)
4881                         *color_space = COLOR_SPACE_YCBCR601;
4882                 else
4883                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4884                 break;
4885
4886         case DRM_COLOR_YCBCR_BT709:
4887                 if (full_range)
4888                         *color_space = COLOR_SPACE_YCBCR709;
4889                 else
4890                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4891                 break;
4892
4893         case DRM_COLOR_YCBCR_BT2020:
4894                 if (full_range)
4895                         *color_space = COLOR_SPACE_2020_YCBCR;
4896                 else
4897                         return -EINVAL;
4898                 break;
4899
4900         default:
4901                 return -EINVAL;
4902         }
4903
4904         return 0;
4905 }
4906
4907 static int
4908 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4909                             const struct drm_plane_state *plane_state,
4910                             const uint64_t tiling_flags,
4911                             struct dc_plane_info *plane_info,
4912                             struct dc_plane_address *address,
4913                             bool tmz_surface,
4914                             bool force_disable_dcc)
4915 {
4916         const struct drm_framebuffer *fb = plane_state->fb;
4917         const struct amdgpu_framebuffer *afb =
4918                 to_amdgpu_framebuffer(plane_state->fb);
4919         int ret;
4920
4921         memset(plane_info, 0, sizeof(*plane_info));
4922
4923         switch (fb->format->format) {
4924         case DRM_FORMAT_C8:
4925                 plane_info->format =
4926                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4927                 break;
4928         case DRM_FORMAT_RGB565:
4929                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4930                 break;
4931         case DRM_FORMAT_XRGB8888:
4932         case DRM_FORMAT_ARGB8888:
4933                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4934                 break;
4935         case DRM_FORMAT_XRGB2101010:
4936         case DRM_FORMAT_ARGB2101010:
4937                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4938                 break;
4939         case DRM_FORMAT_XBGR2101010:
4940         case DRM_FORMAT_ABGR2101010:
4941                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4942                 break;
4943         case DRM_FORMAT_XBGR8888:
4944         case DRM_FORMAT_ABGR8888:
4945                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4946                 break;
4947         case DRM_FORMAT_NV21:
4948                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4949                 break;
4950         case DRM_FORMAT_NV12:
4951                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4952                 break;
4953         case DRM_FORMAT_P010:
4954                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4955                 break;
4956         case DRM_FORMAT_XRGB16161616F:
4957         case DRM_FORMAT_ARGB16161616F:
4958                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4959                 break;
4960         case DRM_FORMAT_XBGR16161616F:
4961         case DRM_FORMAT_ABGR16161616F:
4962                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4963                 break;
4964         default:
4965                 DRM_ERROR(
4966                         "Unsupported screen format %p4cc\n",
4967                         &fb->format->format);
4968                 return -EINVAL;
4969         }
4970
4971         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4972         case DRM_MODE_ROTATE_0:
4973                 plane_info->rotation = ROTATION_ANGLE_0;
4974                 break;
4975         case DRM_MODE_ROTATE_90:
4976                 plane_info->rotation = ROTATION_ANGLE_90;
4977                 break;
4978         case DRM_MODE_ROTATE_180:
4979                 plane_info->rotation = ROTATION_ANGLE_180;
4980                 break;
4981         case DRM_MODE_ROTATE_270:
4982                 plane_info->rotation = ROTATION_ANGLE_270;
4983                 break;
4984         default:
4985                 plane_info->rotation = ROTATION_ANGLE_0;
4986                 break;
4987         }
4988
4989         plane_info->visible = true;
4990         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4991
4992         plane_info->layer_index = 0;
4993
4994         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4995                                           &plane_info->color_space);
4996         if (ret)
4997                 return ret;
4998
4999         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5000                                            plane_info->rotation, tiling_flags,
5001                                            &plane_info->tiling_info,
5002                                            &plane_info->plane_size,
5003                                            &plane_info->dcc, address, tmz_surface,
5004                                            force_disable_dcc);
5005         if (ret)
5006                 return ret;
5007
5008         fill_blending_from_plane_state(
5009                 plane_state, &plane_info->per_pixel_alpha,
5010                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5011
5012         return 0;
5013 }
5014
5015 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5016                                     struct dc_plane_state *dc_plane_state,
5017                                     struct drm_plane_state *plane_state,
5018                                     struct drm_crtc_state *crtc_state)
5019 {
5020         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5021         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5022         struct dc_scaling_info scaling_info;
5023         struct dc_plane_info plane_info;
5024         int ret;
5025         bool force_disable_dcc = false;
5026
5027         ret = fill_dc_scaling_info(plane_state, &scaling_info);
5028         if (ret)
5029                 return ret;
5030
5031         dc_plane_state->src_rect = scaling_info.src_rect;
5032         dc_plane_state->dst_rect = scaling_info.dst_rect;
5033         dc_plane_state->clip_rect = scaling_info.clip_rect;
5034         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5035
5036         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5037         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5038                                           afb->tiling_flags,
5039                                           &plane_info,
5040                                           &dc_plane_state->address,
5041                                           afb->tmz_surface,
5042                                           force_disable_dcc);
5043         if (ret)
5044                 return ret;
5045
5046         dc_plane_state->format = plane_info.format;
5047         dc_plane_state->color_space = plane_info.color_space;
5048         dc_plane_state->format = plane_info.format;
5049         dc_plane_state->plane_size = plane_info.plane_size;
5050         dc_plane_state->rotation = plane_info.rotation;
5051         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5052         dc_plane_state->stereo_format = plane_info.stereo_format;
5053         dc_plane_state->tiling_info = plane_info.tiling_info;
5054         dc_plane_state->visible = plane_info.visible;
5055         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5056         dc_plane_state->global_alpha = plane_info.global_alpha;
5057         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5058         dc_plane_state->dcc = plane_info.dcc;
5059         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5060         dc_plane_state->flip_int_enabled = true;
5061
5062         /*
5063          * Always set input transfer function, since plane state is refreshed
5064          * every time.
5065          */
5066         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5067         if (ret)
5068                 return ret;
5069
5070         return 0;
5071 }
5072
5073 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5074                                            const struct dm_connector_state *dm_state,
5075                                            struct dc_stream_state *stream)
5076 {
5077         enum amdgpu_rmx_type rmx_type;
5078
5079         struct rect src = { 0 }; /* viewport in composition space*/
5080         struct rect dst = { 0 }; /* stream addressable area */
5081
5082         /* no mode. nothing to be done */
5083         if (!mode)
5084                 return;
5085
5086         /* Full screen scaling by default */
5087         src.width = mode->hdisplay;
5088         src.height = mode->vdisplay;
5089         dst.width = stream->timing.h_addressable;
5090         dst.height = stream->timing.v_addressable;
5091
5092         if (dm_state) {
5093                 rmx_type = dm_state->scaling;
5094                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5095                         if (src.width * dst.height <
5096                                         src.height * dst.width) {
5097                                 /* height needs less upscaling/more downscaling */
5098                                 dst.width = src.width *
5099                                                 dst.height / src.height;
5100                         } else {
5101                                 /* width needs less upscaling/more downscaling */
5102                                 dst.height = src.height *
5103                                                 dst.width / src.width;
5104                         }
5105                 } else if (rmx_type == RMX_CENTER) {
5106                         dst = src;
5107                 }
5108
5109                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5110                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5111
5112                 if (dm_state->underscan_enable) {
5113                         dst.x += dm_state->underscan_hborder / 2;
5114                         dst.y += dm_state->underscan_vborder / 2;
5115                         dst.width -= dm_state->underscan_hborder;
5116                         dst.height -= dm_state->underscan_vborder;
5117                 }
5118         }
5119
5120         stream->src = src;
5121         stream->dst = dst;
5122
5123         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5124                       dst.x, dst.y, dst.width, dst.height);
5125
5126 }
5127
5128 static enum dc_color_depth
5129 convert_color_depth_from_display_info(const struct drm_connector *connector,
5130                                       bool is_y420, int requested_bpc)
5131 {
5132         uint8_t bpc;
5133
5134         if (is_y420) {
5135                 bpc = 8;
5136
5137                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5138                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5139                         bpc = 16;
5140                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5141                         bpc = 12;
5142                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5143                         bpc = 10;
5144         } else {
5145                 bpc = (uint8_t)connector->display_info.bpc;
5146                 /* Assume 8 bpc by default if no bpc is specified. */
5147                 bpc = bpc ? bpc : 8;
5148         }
5149
5150         if (requested_bpc > 0) {
5151                 /*
5152                  * Cap display bpc based on the user requested value.
5153                  *
5154                  * The value for state->max_bpc may not correctly updated
5155                  * depending on when the connector gets added to the state
5156                  * or if this was called outside of atomic check, so it
5157                  * can't be used directly.
5158                  */
5159                 bpc = min_t(u8, bpc, requested_bpc);
5160
5161                 /* Round down to the nearest even number. */
5162                 bpc = bpc - (bpc & 1);
5163         }
5164
5165         switch (bpc) {
5166         case 0:
5167                 /*
5168                  * Temporary Work around, DRM doesn't parse color depth for
5169                  * EDID revision before 1.4
5170                  * TODO: Fix edid parsing
5171                  */
5172                 return COLOR_DEPTH_888;
5173         case 6:
5174                 return COLOR_DEPTH_666;
5175         case 8:
5176                 return COLOR_DEPTH_888;
5177         case 10:
5178                 return COLOR_DEPTH_101010;
5179         case 12:
5180                 return COLOR_DEPTH_121212;
5181         case 14:
5182                 return COLOR_DEPTH_141414;
5183         case 16:
5184                 return COLOR_DEPTH_161616;
5185         default:
5186                 return COLOR_DEPTH_UNDEFINED;
5187         }
5188 }
5189
5190 static enum dc_aspect_ratio
5191 get_aspect_ratio(const struct drm_display_mode *mode_in)
5192 {
5193         /* 1-1 mapping, since both enums follow the HDMI spec. */
5194         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5195 }
5196
5197 static enum dc_color_space
5198 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5199 {
5200         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5201
5202         switch (dc_crtc_timing->pixel_encoding) {
5203         case PIXEL_ENCODING_YCBCR422:
5204         case PIXEL_ENCODING_YCBCR444:
5205         case PIXEL_ENCODING_YCBCR420:
5206         {
5207                 /*
5208                  * 27030khz is the separation point between HDTV and SDTV
5209                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5210                  * respectively
5211                  */
5212                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5213                         if (dc_crtc_timing->flags.Y_ONLY)
5214                                 color_space =
5215                                         COLOR_SPACE_YCBCR709_LIMITED;
5216                         else
5217                                 color_space = COLOR_SPACE_YCBCR709;
5218                 } else {
5219                         if (dc_crtc_timing->flags.Y_ONLY)
5220                                 color_space =
5221                                         COLOR_SPACE_YCBCR601_LIMITED;
5222                         else
5223                                 color_space = COLOR_SPACE_YCBCR601;
5224                 }
5225
5226         }
5227         break;
5228         case PIXEL_ENCODING_RGB:
5229                 color_space = COLOR_SPACE_SRGB;
5230                 break;
5231
5232         default:
5233                 WARN_ON(1);
5234                 break;
5235         }
5236
5237         return color_space;
5238 }
5239
5240 static bool adjust_colour_depth_from_display_info(
5241         struct dc_crtc_timing *timing_out,
5242         const struct drm_display_info *info)
5243 {
5244         enum dc_color_depth depth = timing_out->display_color_depth;
5245         int normalized_clk;
5246         do {
5247                 normalized_clk = timing_out->pix_clk_100hz / 10;
5248                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5249                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5250                         normalized_clk /= 2;
5251                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5252                 switch (depth) {
5253                 case COLOR_DEPTH_888:
5254                         break;
5255                 case COLOR_DEPTH_101010:
5256                         normalized_clk = (normalized_clk * 30) / 24;
5257                         break;
5258                 case COLOR_DEPTH_121212:
5259                         normalized_clk = (normalized_clk * 36) / 24;
5260                         break;
5261                 case COLOR_DEPTH_161616:
5262                         normalized_clk = (normalized_clk * 48) / 24;
5263                         break;
5264                 default:
5265                         /* The above depths are the only ones valid for HDMI. */
5266                         return false;
5267                 }
5268                 if (normalized_clk <= info->max_tmds_clock) {
5269                         timing_out->display_color_depth = depth;
5270                         return true;
5271                 }
5272         } while (--depth > COLOR_DEPTH_666);
5273         return false;
5274 }
5275
5276 static void fill_stream_properties_from_drm_display_mode(
5277         struct dc_stream_state *stream,
5278         const struct drm_display_mode *mode_in,
5279         const struct drm_connector *connector,
5280         const struct drm_connector_state *connector_state,
5281         const struct dc_stream_state *old_stream,
5282         int requested_bpc)
5283 {
5284         struct dc_crtc_timing *timing_out = &stream->timing;
5285         const struct drm_display_info *info = &connector->display_info;
5286         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5287         struct hdmi_vendor_infoframe hv_frame;
5288         struct hdmi_avi_infoframe avi_frame;
5289
5290         memset(&hv_frame, 0, sizeof(hv_frame));
5291         memset(&avi_frame, 0, sizeof(avi_frame));
5292
5293         timing_out->h_border_left = 0;
5294         timing_out->h_border_right = 0;
5295         timing_out->v_border_top = 0;
5296         timing_out->v_border_bottom = 0;
5297         /* TODO: un-hardcode */
5298         if (drm_mode_is_420_only(info, mode_in)
5299                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5300                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5301         else if (drm_mode_is_420_also(info, mode_in)
5302                         && aconnector->force_yuv420_output)
5303                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5304         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5305                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5306                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5307         else
5308                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5309
5310         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5311         timing_out->display_color_depth = convert_color_depth_from_display_info(
5312                 connector,
5313                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5314                 requested_bpc);
5315         timing_out->scan_type = SCANNING_TYPE_NODATA;
5316         timing_out->hdmi_vic = 0;
5317
5318         if(old_stream) {
5319                 timing_out->vic = old_stream->timing.vic;
5320                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5321                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5322         } else {
5323                 timing_out->vic = drm_match_cea_mode(mode_in);
5324                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5325                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5326                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5327                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5328         }
5329
5330         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5331                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5332                 timing_out->vic = avi_frame.video_code;
5333                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5334                 timing_out->hdmi_vic = hv_frame.vic;
5335         }
5336
5337         if (is_freesync_video_mode(mode_in, aconnector)) {
5338                 timing_out->h_addressable = mode_in->hdisplay;
5339                 timing_out->h_total = mode_in->htotal;
5340                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5341                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5342                 timing_out->v_total = mode_in->vtotal;
5343                 timing_out->v_addressable = mode_in->vdisplay;
5344                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5345                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5346                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5347         } else {
5348                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5349                 timing_out->h_total = mode_in->crtc_htotal;
5350                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5351                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5352                 timing_out->v_total = mode_in->crtc_vtotal;
5353                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5354                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5355                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5356                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5357         }
5358
5359         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5360
5361         stream->output_color_space = get_output_color_space(timing_out);
5362
5363         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5364         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5365         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5366                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5367                     drm_mode_is_420_also(info, mode_in) &&
5368                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5369                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5370                         adjust_colour_depth_from_display_info(timing_out, info);
5371                 }
5372         }
5373 }
5374
5375 static void fill_audio_info(struct audio_info *audio_info,
5376                             const struct drm_connector *drm_connector,
5377                             const struct dc_sink *dc_sink)
5378 {
5379         int i = 0;
5380         int cea_revision = 0;
5381         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5382
5383         audio_info->manufacture_id = edid_caps->manufacturer_id;
5384         audio_info->product_id = edid_caps->product_id;
5385
5386         cea_revision = drm_connector->display_info.cea_rev;
5387
5388         strscpy(audio_info->display_name,
5389                 edid_caps->display_name,
5390                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5391
5392         if (cea_revision >= 3) {
5393                 audio_info->mode_count = edid_caps->audio_mode_count;
5394
5395                 for (i = 0; i < audio_info->mode_count; ++i) {
5396                         audio_info->modes[i].format_code =
5397                                         (enum audio_format_code)
5398                                         (edid_caps->audio_modes[i].format_code);
5399                         audio_info->modes[i].channel_count =
5400                                         edid_caps->audio_modes[i].channel_count;
5401                         audio_info->modes[i].sample_rates.all =
5402                                         edid_caps->audio_modes[i].sample_rate;
5403                         audio_info->modes[i].sample_size =
5404                                         edid_caps->audio_modes[i].sample_size;
5405                 }
5406         }
5407
5408         audio_info->flags.all = edid_caps->speaker_flags;
5409
5410         /* TODO: We only check for the progressive mode, check for interlace mode too */
5411         if (drm_connector->latency_present[0]) {
5412                 audio_info->video_latency = drm_connector->video_latency[0];
5413                 audio_info->audio_latency = drm_connector->audio_latency[0];
5414         }
5415
5416         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5417
5418 }
5419
5420 static void
5421 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5422                                       struct drm_display_mode *dst_mode)
5423 {
5424         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5425         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5426         dst_mode->crtc_clock = src_mode->crtc_clock;
5427         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5428         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5429         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5430         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5431         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5432         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5433         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5434         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5435         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5436         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5437         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5438 }
5439
5440 static void
5441 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5442                                         const struct drm_display_mode *native_mode,
5443                                         bool scale_enabled)
5444 {
5445         if (scale_enabled) {
5446                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5447         } else if (native_mode->clock == drm_mode->clock &&
5448                         native_mode->htotal == drm_mode->htotal &&
5449                         native_mode->vtotal == drm_mode->vtotal) {
5450                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5451         } else {
5452                 /* no scaling nor amdgpu inserted, no need to patch */
5453         }
5454 }
5455
5456 static struct dc_sink *
5457 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5458 {
5459         struct dc_sink_init_data sink_init_data = { 0 };
5460         struct dc_sink *sink = NULL;
5461         sink_init_data.link = aconnector->dc_link;
5462         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5463
5464         sink = dc_sink_create(&sink_init_data);
5465         if (!sink) {
5466                 DRM_ERROR("Failed to create sink!\n");
5467                 return NULL;
5468         }
5469         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5470
5471         return sink;
5472 }
5473
5474 static void set_multisync_trigger_params(
5475                 struct dc_stream_state *stream)
5476 {
5477         struct dc_stream_state *master = NULL;
5478
5479         if (stream->triggered_crtc_reset.enabled) {
5480                 master = stream->triggered_crtc_reset.event_source;
5481                 stream->triggered_crtc_reset.event =
5482                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5483                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5484                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5485         }
5486 }
5487
5488 static void set_master_stream(struct dc_stream_state *stream_set[],
5489                               int stream_count)
5490 {
5491         int j, highest_rfr = 0, master_stream = 0;
5492
5493         for (j = 0;  j < stream_count; j++) {
5494                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5495                         int refresh_rate = 0;
5496
5497                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5498                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5499                         if (refresh_rate > highest_rfr) {
5500                                 highest_rfr = refresh_rate;
5501                                 master_stream = j;
5502                         }
5503                 }
5504         }
5505         for (j = 0;  j < stream_count; j++) {
5506                 if (stream_set[j])
5507                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5508         }
5509 }
5510
5511 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5512 {
5513         int i = 0;
5514         struct dc_stream_state *stream;
5515
5516         if (context->stream_count < 2)
5517                 return;
5518         for (i = 0; i < context->stream_count ; i++) {
5519                 if (!context->streams[i])
5520                         continue;
5521                 /*
5522                  * TODO: add a function to read AMD VSDB bits and set
5523                  * crtc_sync_master.multi_sync_enabled flag
5524                  * For now it's set to false
5525                  */
5526         }
5527
5528         set_master_stream(context->streams, context->stream_count);
5529
5530         for (i = 0; i < context->stream_count ; i++) {
5531                 stream = context->streams[i];
5532
5533                 if (!stream)
5534                         continue;
5535
5536                 set_multisync_trigger_params(stream);
5537         }
5538 }
5539
5540 static struct drm_display_mode *
5541 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5542                           bool use_probed_modes)
5543 {
5544         struct drm_display_mode *m, *m_pref = NULL;
5545         u16 current_refresh, highest_refresh;
5546         struct list_head *list_head = use_probed_modes ?
5547                                                     &aconnector->base.probed_modes :
5548                                                     &aconnector->base.modes;
5549
5550         if (aconnector->freesync_vid_base.clock != 0)
5551                 return &aconnector->freesync_vid_base;
5552
5553         /* Find the preferred mode */
5554         list_for_each_entry (m, list_head, head) {
5555                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5556                         m_pref = m;
5557                         break;
5558                 }
5559         }
5560
5561         if (!m_pref) {
5562                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5563                 m_pref = list_first_entry_or_null(
5564                         &aconnector->base.modes, struct drm_display_mode, head);
5565                 if (!m_pref) {
5566                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5567                         return NULL;
5568                 }
5569         }
5570
5571         highest_refresh = drm_mode_vrefresh(m_pref);
5572
5573         /*
5574          * Find the mode with highest refresh rate with same resolution.
5575          * For some monitors, preferred mode is not the mode with highest
5576          * supported refresh rate.
5577          */
5578         list_for_each_entry (m, list_head, head) {
5579                 current_refresh  = drm_mode_vrefresh(m);
5580
5581                 if (m->hdisplay == m_pref->hdisplay &&
5582                     m->vdisplay == m_pref->vdisplay &&
5583                     highest_refresh < current_refresh) {
5584                         highest_refresh = current_refresh;
5585                         m_pref = m;
5586                 }
5587         }
5588
5589         aconnector->freesync_vid_base = *m_pref;
5590         return m_pref;
5591 }
5592
5593 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5594                                    struct amdgpu_dm_connector *aconnector)
5595 {
5596         struct drm_display_mode *high_mode;
5597         int timing_diff;
5598
5599         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5600         if (!high_mode || !mode)
5601                 return false;
5602
5603         timing_diff = high_mode->vtotal - mode->vtotal;
5604
5605         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5606             high_mode->hdisplay != mode->hdisplay ||
5607             high_mode->vdisplay != mode->vdisplay ||
5608             high_mode->hsync_start != mode->hsync_start ||
5609             high_mode->hsync_end != mode->hsync_end ||
5610             high_mode->htotal != mode->htotal ||
5611             high_mode->hskew != mode->hskew ||
5612             high_mode->vscan != mode->vscan ||
5613             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5614             high_mode->vsync_end - mode->vsync_end != timing_diff)
5615                 return false;
5616         else
5617                 return true;
5618 }
5619
5620 static struct dc_stream_state *
5621 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5622                        const struct drm_display_mode *drm_mode,
5623                        const struct dm_connector_state *dm_state,
5624                        const struct dc_stream_state *old_stream,
5625                        int requested_bpc)
5626 {
5627         struct drm_display_mode *preferred_mode = NULL;
5628         struct drm_connector *drm_connector;
5629         const struct drm_connector_state *con_state =
5630                 dm_state ? &dm_state->base : NULL;
5631         struct dc_stream_state *stream = NULL;
5632         struct drm_display_mode mode = *drm_mode;
5633         struct drm_display_mode saved_mode;
5634         struct drm_display_mode *freesync_mode = NULL;
5635         bool native_mode_found = false;
5636         bool recalculate_timing = false;
5637         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5638         int mode_refresh;
5639         int preferred_refresh = 0;
5640 #if defined(CONFIG_DRM_AMD_DC_DCN)
5641         struct dsc_dec_dpcd_caps dsc_caps;
5642         uint32_t link_bandwidth_kbps;
5643 #endif
5644         struct dc_sink *sink = NULL;
5645
5646         memset(&saved_mode, 0, sizeof(saved_mode));
5647
5648         if (aconnector == NULL) {
5649                 DRM_ERROR("aconnector is NULL!\n");
5650                 return stream;
5651         }
5652
5653         drm_connector = &aconnector->base;
5654
5655         if (!aconnector->dc_sink) {
5656                 sink = create_fake_sink(aconnector);
5657                 if (!sink)
5658                         return stream;
5659         } else {
5660                 sink = aconnector->dc_sink;
5661                 dc_sink_retain(sink);
5662         }
5663
5664         stream = dc_create_stream_for_sink(sink);
5665
5666         if (stream == NULL) {
5667                 DRM_ERROR("Failed to create stream for sink!\n");
5668                 goto finish;
5669         }
5670
5671         stream->dm_stream_context = aconnector;
5672
5673         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5674                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5675
5676         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5677                 /* Search for preferred mode */
5678                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5679                         native_mode_found = true;
5680                         break;
5681                 }
5682         }
5683         if (!native_mode_found)
5684                 preferred_mode = list_first_entry_or_null(
5685                                 &aconnector->base.modes,
5686                                 struct drm_display_mode,
5687                                 head);
5688
5689         mode_refresh = drm_mode_vrefresh(&mode);
5690
5691         if (preferred_mode == NULL) {
5692                 /*
5693                  * This may not be an error, the use case is when we have no
5694                  * usermode calls to reset and set mode upon hotplug. In this
5695                  * case, we call set mode ourselves to restore the previous mode
5696                  * and the modelist may not be filled in in time.
5697                  */
5698                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5699         } else {
5700                 recalculate_timing = amdgpu_freesync_vid_mode &&
5701                                  is_freesync_video_mode(&mode, aconnector);
5702                 if (recalculate_timing) {
5703                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5704                         saved_mode = mode;
5705                         mode = *freesync_mode;
5706                 } else {
5707                         decide_crtc_timing_for_drm_display_mode(
5708                                 &mode, preferred_mode, scale);
5709
5710                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
5711                 }
5712         }
5713
5714         if (recalculate_timing)
5715                 drm_mode_set_crtcinfo(&saved_mode, 0);
5716         else if (!dm_state)
5717                 drm_mode_set_crtcinfo(&mode, 0);
5718
5719        /*
5720         * If scaling is enabled and refresh rate didn't change
5721         * we copy the vic and polarities of the old timings
5722         */
5723         if (!scale || mode_refresh != preferred_refresh)
5724                 fill_stream_properties_from_drm_display_mode(
5725                         stream, &mode, &aconnector->base, con_state, NULL,
5726                         requested_bpc);
5727         else
5728                 fill_stream_properties_from_drm_display_mode(
5729                         stream, &mode, &aconnector->base, con_state, old_stream,
5730                         requested_bpc);
5731
5732         stream->timing.flags.DSC = 0;
5733
5734         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5735 #if defined(CONFIG_DRM_AMD_DC_DCN)
5736                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5737                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5738                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5739                                       &dsc_caps);
5740                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5741                                                              dc_link_get_link_cap(aconnector->dc_link));
5742
5743                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5744                         /* Set DSC policy according to dsc_clock_en */
5745                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5746                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5747
5748                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5749                                                   &dsc_caps,
5750                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5751                                                   0,
5752                                                   link_bandwidth_kbps,
5753                                                   &stream->timing,
5754                                                   &stream->timing.dsc_cfg))
5755                                 stream->timing.flags.DSC = 1;
5756                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5757                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5758                                 stream->timing.flags.DSC = 1;
5759
5760                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5761                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5762
5763                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5764                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5765
5766                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5767                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5768                 }
5769 #endif
5770         }
5771
5772         update_stream_scaling_settings(&mode, dm_state, stream);
5773
5774         fill_audio_info(
5775                 &stream->audio_info,
5776                 drm_connector,
5777                 sink);
5778
5779         update_stream_signal(stream, sink);
5780
5781         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5782                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5783
5784         if (stream->link->psr_settings.psr_feature_enabled) {
5785                 //
5786                 // should decide stream support vsc sdp colorimetry capability
5787                 // before building vsc info packet
5788                 //
5789                 stream->use_vsc_sdp_for_colorimetry = false;
5790                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5791                         stream->use_vsc_sdp_for_colorimetry =
5792                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5793                 } else {
5794                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5795                                 stream->use_vsc_sdp_for_colorimetry = true;
5796                 }
5797                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5798         }
5799 finish:
5800         dc_sink_release(sink);
5801
5802         return stream;
5803 }
5804
5805 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5806 {
5807         drm_crtc_cleanup(crtc);
5808         kfree(crtc);
5809 }
5810
5811 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5812                                   struct drm_crtc_state *state)
5813 {
5814         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5815
5816         /* TODO Destroy dc_stream objects are stream object is flattened */
5817         if (cur->stream)
5818                 dc_stream_release(cur->stream);
5819
5820
5821         __drm_atomic_helper_crtc_destroy_state(state);
5822
5823
5824         kfree(state);
5825 }
5826
5827 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5828 {
5829         struct dm_crtc_state *state;
5830
5831         if (crtc->state)
5832                 dm_crtc_destroy_state(crtc, crtc->state);
5833
5834         state = kzalloc(sizeof(*state), GFP_KERNEL);
5835         if (WARN_ON(!state))
5836                 return;
5837
5838         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5839 }
5840
5841 static struct drm_crtc_state *
5842 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5843 {
5844         struct dm_crtc_state *state, *cur;
5845
5846         cur = to_dm_crtc_state(crtc->state);
5847
5848         if (WARN_ON(!crtc->state))
5849                 return NULL;
5850
5851         state = kzalloc(sizeof(*state), GFP_KERNEL);
5852         if (!state)
5853                 return NULL;
5854
5855         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5856
5857         if (cur->stream) {
5858                 state->stream = cur->stream;
5859                 dc_stream_retain(state->stream);
5860         }
5861
5862         state->active_planes = cur->active_planes;
5863         state->vrr_infopacket = cur->vrr_infopacket;
5864         state->abm_level = cur->abm_level;
5865         state->vrr_supported = cur->vrr_supported;
5866         state->freesync_config = cur->freesync_config;
5867         state->cm_has_degamma = cur->cm_has_degamma;
5868         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5869         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5870
5871         return &state->base;
5872 }
5873
5874 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5875 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5876 {
5877         crtc_debugfs_init(crtc);
5878
5879         return 0;
5880 }
5881 #endif
5882
5883 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5884 {
5885         enum dc_irq_source irq_source;
5886         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5887         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5888         int rc;
5889
5890         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5891
5892         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5893
5894         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5895                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5896         return rc;
5897 }
5898
5899 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5900 {
5901         enum dc_irq_source irq_source;
5902         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5903         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5904         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5905 #if defined(CONFIG_DRM_AMD_DC_DCN)
5906         struct amdgpu_display_manager *dm = &adev->dm;
5907         unsigned long flags;
5908 #endif
5909         int rc = 0;
5910
5911         if (enable) {
5912                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5913                 if (amdgpu_dm_vrr_active(acrtc_state))
5914                         rc = dm_set_vupdate_irq(crtc, true);
5915         } else {
5916                 /* vblank irq off -> vupdate irq off */
5917                 rc = dm_set_vupdate_irq(crtc, false);
5918         }
5919
5920         if (rc)
5921                 return rc;
5922
5923         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5924
5925         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5926                 return -EBUSY;
5927
5928         if (amdgpu_in_reset(adev))
5929                 return 0;
5930
5931 #if defined(CONFIG_DRM_AMD_DC_DCN)
5932         spin_lock_irqsave(&dm->vblank_lock, flags);
5933         dm->vblank_workqueue->dm = dm;
5934         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5935         dm->vblank_workqueue->enable = enable;
5936         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5937         schedule_work(&dm->vblank_workqueue->mall_work);
5938 #endif
5939
5940         return 0;
5941 }
5942
5943 static int dm_enable_vblank(struct drm_crtc *crtc)
5944 {
5945         return dm_set_vblank(crtc, true);
5946 }
5947
5948 static void dm_disable_vblank(struct drm_crtc *crtc)
5949 {
5950         dm_set_vblank(crtc, false);
5951 }
5952
5953 /* Implemented only the options currently availible for the driver */
5954 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5955         .reset = dm_crtc_reset_state,
5956         .destroy = amdgpu_dm_crtc_destroy,
5957         .set_config = drm_atomic_helper_set_config,
5958         .page_flip = drm_atomic_helper_page_flip,
5959         .atomic_duplicate_state = dm_crtc_duplicate_state,
5960         .atomic_destroy_state = dm_crtc_destroy_state,
5961         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5962         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5963         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5964         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5965         .enable_vblank = dm_enable_vblank,
5966         .disable_vblank = dm_disable_vblank,
5967         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5968 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5969         .late_register = amdgpu_dm_crtc_late_register,
5970 #endif
5971 };
5972
5973 static enum drm_connector_status
5974 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5975 {
5976         bool connected;
5977         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5978
5979         /*
5980          * Notes:
5981          * 1. This interface is NOT called in context of HPD irq.
5982          * 2. This interface *is called* in context of user-mode ioctl. Which
5983          * makes it a bad place for *any* MST-related activity.
5984          */
5985
5986         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5987             !aconnector->fake_enable)
5988                 connected = (aconnector->dc_sink != NULL);
5989         else
5990                 connected = (aconnector->base.force == DRM_FORCE_ON);
5991
5992         update_subconnector_property(aconnector);
5993
5994         return (connected ? connector_status_connected :
5995                         connector_status_disconnected);
5996 }
5997
5998 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5999                                             struct drm_connector_state *connector_state,
6000                                             struct drm_property *property,
6001                                             uint64_t val)
6002 {
6003         struct drm_device *dev = connector->dev;
6004         struct amdgpu_device *adev = drm_to_adev(dev);
6005         struct dm_connector_state *dm_old_state =
6006                 to_dm_connector_state(connector->state);
6007         struct dm_connector_state *dm_new_state =
6008                 to_dm_connector_state(connector_state);
6009
6010         int ret = -EINVAL;
6011
6012         if (property == dev->mode_config.scaling_mode_property) {
6013                 enum amdgpu_rmx_type rmx_type;
6014
6015                 switch (val) {
6016                 case DRM_MODE_SCALE_CENTER:
6017                         rmx_type = RMX_CENTER;
6018                         break;
6019                 case DRM_MODE_SCALE_ASPECT:
6020                         rmx_type = RMX_ASPECT;
6021                         break;
6022                 case DRM_MODE_SCALE_FULLSCREEN:
6023                         rmx_type = RMX_FULL;
6024                         break;
6025                 case DRM_MODE_SCALE_NONE:
6026                 default:
6027                         rmx_type = RMX_OFF;
6028                         break;
6029                 }
6030
6031                 if (dm_old_state->scaling == rmx_type)
6032                         return 0;
6033
6034                 dm_new_state->scaling = rmx_type;
6035                 ret = 0;
6036         } else if (property == adev->mode_info.underscan_hborder_property) {
6037                 dm_new_state->underscan_hborder = val;
6038                 ret = 0;
6039         } else if (property == adev->mode_info.underscan_vborder_property) {
6040                 dm_new_state->underscan_vborder = val;
6041                 ret = 0;
6042         } else if (property == adev->mode_info.underscan_property) {
6043                 dm_new_state->underscan_enable = val;
6044                 ret = 0;
6045         } else if (property == adev->mode_info.abm_level_property) {
6046                 dm_new_state->abm_level = val;
6047                 ret = 0;
6048         }
6049
6050         return ret;
6051 }
6052
6053 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6054                                             const struct drm_connector_state *state,
6055                                             struct drm_property *property,
6056                                             uint64_t *val)
6057 {
6058         struct drm_device *dev = connector->dev;
6059         struct amdgpu_device *adev = drm_to_adev(dev);
6060         struct dm_connector_state *dm_state =
6061                 to_dm_connector_state(state);
6062         int ret = -EINVAL;
6063
6064         if (property == dev->mode_config.scaling_mode_property) {
6065                 switch (dm_state->scaling) {
6066                 case RMX_CENTER:
6067                         *val = DRM_MODE_SCALE_CENTER;
6068                         break;
6069                 case RMX_ASPECT:
6070                         *val = DRM_MODE_SCALE_ASPECT;
6071                         break;
6072                 case RMX_FULL:
6073                         *val = DRM_MODE_SCALE_FULLSCREEN;
6074                         break;
6075                 case RMX_OFF:
6076                 default:
6077                         *val = DRM_MODE_SCALE_NONE;
6078                         break;
6079                 }
6080                 ret = 0;
6081         } else if (property == adev->mode_info.underscan_hborder_property) {
6082                 *val = dm_state->underscan_hborder;
6083                 ret = 0;
6084         } else if (property == adev->mode_info.underscan_vborder_property) {
6085                 *val = dm_state->underscan_vborder;
6086                 ret = 0;
6087         } else if (property == adev->mode_info.underscan_property) {
6088                 *val = dm_state->underscan_enable;
6089                 ret = 0;
6090         } else if (property == adev->mode_info.abm_level_property) {
6091                 *val = dm_state->abm_level;
6092                 ret = 0;
6093         }
6094
6095         return ret;
6096 }
6097
6098 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6099 {
6100         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6101
6102         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6103 }
6104
6105 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6106 {
6107         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6108         const struct dc_link *link = aconnector->dc_link;
6109         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6110         struct amdgpu_display_manager *dm = &adev->dm;
6111
6112         /*
6113          * Call only if mst_mgr was iniitalized before since it's not done
6114          * for all connector types.
6115          */
6116         if (aconnector->mst_mgr.dev)
6117                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6118
6119 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6120         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6121
6122         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6123             link->type != dc_connection_none &&
6124             dm->backlight_dev) {
6125                 backlight_device_unregister(dm->backlight_dev);
6126                 dm->backlight_dev = NULL;
6127         }
6128 #endif
6129
6130         if (aconnector->dc_em_sink)
6131                 dc_sink_release(aconnector->dc_em_sink);
6132         aconnector->dc_em_sink = NULL;
6133         if (aconnector->dc_sink)
6134                 dc_sink_release(aconnector->dc_sink);
6135         aconnector->dc_sink = NULL;
6136
6137         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6138         drm_connector_unregister(connector);
6139         drm_connector_cleanup(connector);
6140         if (aconnector->i2c) {
6141                 i2c_del_adapter(&aconnector->i2c->base);
6142                 kfree(aconnector->i2c);
6143         }
6144         kfree(aconnector->dm_dp_aux.aux.name);
6145
6146         kfree(connector);
6147 }
6148
6149 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6150 {
6151         struct dm_connector_state *state =
6152                 to_dm_connector_state(connector->state);
6153
6154         if (connector->state)
6155                 __drm_atomic_helper_connector_destroy_state(connector->state);
6156
6157         kfree(state);
6158
6159         state = kzalloc(sizeof(*state), GFP_KERNEL);
6160
6161         if (state) {
6162                 state->scaling = RMX_OFF;
6163                 state->underscan_enable = false;
6164                 state->underscan_hborder = 0;
6165                 state->underscan_vborder = 0;
6166                 state->base.max_requested_bpc = 8;
6167                 state->vcpi_slots = 0;
6168                 state->pbn = 0;
6169                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6170                         state->abm_level = amdgpu_dm_abm_level;
6171
6172                 __drm_atomic_helper_connector_reset(connector, &state->base);
6173         }
6174 }
6175
6176 struct drm_connector_state *
6177 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6178 {
6179         struct dm_connector_state *state =
6180                 to_dm_connector_state(connector->state);
6181
6182         struct dm_connector_state *new_state =
6183                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6184
6185         if (!new_state)
6186                 return NULL;
6187
6188         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6189
6190         new_state->freesync_capable = state->freesync_capable;
6191         new_state->abm_level = state->abm_level;
6192         new_state->scaling = state->scaling;
6193         new_state->underscan_enable = state->underscan_enable;
6194         new_state->underscan_hborder = state->underscan_hborder;
6195         new_state->underscan_vborder = state->underscan_vborder;
6196         new_state->vcpi_slots = state->vcpi_slots;
6197         new_state->pbn = state->pbn;
6198         return &new_state->base;
6199 }
6200
6201 static int
6202 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6203 {
6204         struct amdgpu_dm_connector *amdgpu_dm_connector =
6205                 to_amdgpu_dm_connector(connector);
6206         int r;
6207
6208         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6209             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6210                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6211                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6212                 if (r)
6213                         return r;
6214         }
6215
6216 #if defined(CONFIG_DEBUG_FS)
6217         connector_debugfs_init(amdgpu_dm_connector);
6218 #endif
6219
6220         return 0;
6221 }
6222
6223 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6224         .reset = amdgpu_dm_connector_funcs_reset,
6225         .detect = amdgpu_dm_connector_detect,
6226         .fill_modes = drm_helper_probe_single_connector_modes,
6227         .destroy = amdgpu_dm_connector_destroy,
6228         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6229         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6230         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6231         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6232         .late_register = amdgpu_dm_connector_late_register,
6233         .early_unregister = amdgpu_dm_connector_unregister
6234 };
6235
6236 static int get_modes(struct drm_connector *connector)
6237 {
6238         return amdgpu_dm_connector_get_modes(connector);
6239 }
6240
6241 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6242 {
6243         struct dc_sink_init_data init_params = {
6244                         .link = aconnector->dc_link,
6245                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6246         };
6247         struct edid *edid;
6248
6249         if (!aconnector->base.edid_blob_ptr) {
6250                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6251                                 aconnector->base.name);
6252
6253                 aconnector->base.force = DRM_FORCE_OFF;
6254                 aconnector->base.override_edid = false;
6255                 return;
6256         }
6257
6258         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6259
6260         aconnector->edid = edid;
6261
6262         aconnector->dc_em_sink = dc_link_add_remote_sink(
6263                 aconnector->dc_link,
6264                 (uint8_t *)edid,
6265                 (edid->extensions + 1) * EDID_LENGTH,
6266                 &init_params);
6267
6268         if (aconnector->base.force == DRM_FORCE_ON) {
6269                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6270                 aconnector->dc_link->local_sink :
6271                 aconnector->dc_em_sink;
6272                 dc_sink_retain(aconnector->dc_sink);
6273         }
6274 }
6275
6276 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6277 {
6278         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6279
6280         /*
6281          * In case of headless boot with force on for DP managed connector
6282          * Those settings have to be != 0 to get initial modeset
6283          */
6284         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6285                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6286                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6287         }
6288
6289
6290         aconnector->base.override_edid = true;
6291         create_eml_sink(aconnector);
6292 }
6293
6294 static struct dc_stream_state *
6295 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6296                                 const struct drm_display_mode *drm_mode,
6297                                 const struct dm_connector_state *dm_state,
6298                                 const struct dc_stream_state *old_stream)
6299 {
6300         struct drm_connector *connector = &aconnector->base;
6301         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6302         struct dc_stream_state *stream;
6303         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6304         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6305         enum dc_status dc_result = DC_OK;
6306
6307         do {
6308                 stream = create_stream_for_sink(aconnector, drm_mode,
6309                                                 dm_state, old_stream,
6310                                                 requested_bpc);
6311                 if (stream == NULL) {
6312                         DRM_ERROR("Failed to create stream for sink!\n");
6313                         break;
6314                 }
6315
6316                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6317
6318                 if (dc_result != DC_OK) {
6319                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6320                                       drm_mode->hdisplay,
6321                                       drm_mode->vdisplay,
6322                                       drm_mode->clock,
6323                                       dc_result,
6324                                       dc_status_to_str(dc_result));
6325
6326                         dc_stream_release(stream);
6327                         stream = NULL;
6328                         requested_bpc -= 2; /* lower bpc to retry validation */
6329                 }
6330
6331         } while (stream == NULL && requested_bpc >= 6);
6332
6333         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6334                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6335
6336                 aconnector->force_yuv420_output = true;
6337                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6338                                                 dm_state, old_stream);
6339                 aconnector->force_yuv420_output = false;
6340         }
6341
6342         return stream;
6343 }
6344
6345 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6346                                    struct drm_display_mode *mode)
6347 {
6348         int result = MODE_ERROR;
6349         struct dc_sink *dc_sink;
6350         /* TODO: Unhardcode stream count */
6351         struct dc_stream_state *stream;
6352         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6353
6354         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6355                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6356                 return result;
6357
6358         /*
6359          * Only run this the first time mode_valid is called to initilialize
6360          * EDID mgmt
6361          */
6362         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6363                 !aconnector->dc_em_sink)
6364                 handle_edid_mgmt(aconnector);
6365
6366         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6367
6368         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6369                                 aconnector->base.force != DRM_FORCE_ON) {
6370                 DRM_ERROR("dc_sink is NULL!\n");
6371                 goto fail;
6372         }
6373
6374         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6375         if (stream) {
6376                 dc_stream_release(stream);
6377                 result = MODE_OK;
6378         }
6379
6380 fail:
6381         /* TODO: error handling*/
6382         return result;
6383 }
6384
6385 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6386                                 struct dc_info_packet *out)
6387 {
6388         struct hdmi_drm_infoframe frame;
6389         unsigned char buf[30]; /* 26 + 4 */
6390         ssize_t len;
6391         int ret, i;
6392
6393         memset(out, 0, sizeof(*out));
6394
6395         if (!state->hdr_output_metadata)
6396                 return 0;
6397
6398         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6399         if (ret)
6400                 return ret;
6401
6402         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6403         if (len < 0)
6404                 return (int)len;
6405
6406         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6407         if (len != 30)
6408                 return -EINVAL;
6409
6410         /* Prepare the infopacket for DC. */
6411         switch (state->connector->connector_type) {
6412         case DRM_MODE_CONNECTOR_HDMIA:
6413                 out->hb0 = 0x87; /* type */
6414                 out->hb1 = 0x01; /* version */
6415                 out->hb2 = 0x1A; /* length */
6416                 out->sb[0] = buf[3]; /* checksum */
6417                 i = 1;
6418                 break;
6419
6420         case DRM_MODE_CONNECTOR_DisplayPort:
6421         case DRM_MODE_CONNECTOR_eDP:
6422                 out->hb0 = 0x00; /* sdp id, zero */
6423                 out->hb1 = 0x87; /* type */
6424                 out->hb2 = 0x1D; /* payload len - 1 */
6425                 out->hb3 = (0x13 << 2); /* sdp version */
6426                 out->sb[0] = 0x01; /* version */
6427                 out->sb[1] = 0x1A; /* length */
6428                 i = 2;
6429                 break;
6430
6431         default:
6432                 return -EINVAL;
6433         }
6434
6435         memcpy(&out->sb[i], &buf[4], 26);
6436         out->valid = true;
6437
6438         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6439                        sizeof(out->sb), false);
6440
6441         return 0;
6442 }
6443
6444 static int
6445 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6446                                  struct drm_atomic_state *state)
6447 {
6448         struct drm_connector_state *new_con_state =
6449                 drm_atomic_get_new_connector_state(state, conn);
6450         struct drm_connector_state *old_con_state =
6451                 drm_atomic_get_old_connector_state(state, conn);
6452         struct drm_crtc *crtc = new_con_state->crtc;
6453         struct drm_crtc_state *new_crtc_state;
6454         int ret;
6455
6456         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6457
6458         if (!crtc)
6459                 return 0;
6460
6461         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6462                 struct dc_info_packet hdr_infopacket;
6463
6464                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6465                 if (ret)
6466                         return ret;
6467
6468                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6469                 if (IS_ERR(new_crtc_state))
6470                         return PTR_ERR(new_crtc_state);
6471
6472                 /*
6473                  * DC considers the stream backends changed if the
6474                  * static metadata changes. Forcing the modeset also
6475                  * gives a simple way for userspace to switch from
6476                  * 8bpc to 10bpc when setting the metadata to enter
6477                  * or exit HDR.
6478                  *
6479                  * Changing the static metadata after it's been
6480                  * set is permissible, however. So only force a
6481                  * modeset if we're entering or exiting HDR.
6482                  */
6483                 new_crtc_state->mode_changed =
6484                         !old_con_state->hdr_output_metadata ||
6485                         !new_con_state->hdr_output_metadata;
6486         }
6487
6488         return 0;
6489 }
6490
6491 static const struct drm_connector_helper_funcs
6492 amdgpu_dm_connector_helper_funcs = {
6493         /*
6494          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6495          * modes will be filtered by drm_mode_validate_size(), and those modes
6496          * are missing after user start lightdm. So we need to renew modes list.
6497          * in get_modes call back, not just return the modes count
6498          */
6499         .get_modes = get_modes,
6500         .mode_valid = amdgpu_dm_connector_mode_valid,
6501         .atomic_check = amdgpu_dm_connector_atomic_check,
6502 };
6503
6504 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6505 {
6506 }
6507
6508 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6509 {
6510         struct drm_atomic_state *state = new_crtc_state->state;
6511         struct drm_plane *plane;
6512         int num_active = 0;
6513
6514         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6515                 struct drm_plane_state *new_plane_state;
6516
6517                 /* Cursor planes are "fake". */
6518                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6519                         continue;
6520
6521                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6522
6523                 if (!new_plane_state) {
6524                         /*
6525                          * The plane is enable on the CRTC and hasn't changed
6526                          * state. This means that it previously passed
6527                          * validation and is therefore enabled.
6528                          */
6529                         num_active += 1;
6530                         continue;
6531                 }
6532
6533                 /* We need a framebuffer to be considered enabled. */
6534                 num_active += (new_plane_state->fb != NULL);
6535         }
6536
6537         return num_active;
6538 }
6539
6540 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6541                                          struct drm_crtc_state *new_crtc_state)
6542 {
6543         struct dm_crtc_state *dm_new_crtc_state =
6544                 to_dm_crtc_state(new_crtc_state);
6545
6546         dm_new_crtc_state->active_planes = 0;
6547
6548         if (!dm_new_crtc_state->stream)
6549                 return;
6550
6551         dm_new_crtc_state->active_planes =
6552                 count_crtc_active_planes(new_crtc_state);
6553 }
6554
6555 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6556                                        struct drm_atomic_state *state)
6557 {
6558         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6559                                                                           crtc);
6560         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6561         struct dc *dc = adev->dm.dc;
6562         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6563         int ret = -EINVAL;
6564
6565         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6566
6567         dm_update_crtc_active_planes(crtc, crtc_state);
6568
6569         if (unlikely(!dm_crtc_state->stream &&
6570                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6571                 WARN_ON(1);
6572                 return ret;
6573         }
6574
6575         /*
6576          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6577          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6578          * planes are disabled, which is not supported by the hardware. And there is legacy
6579          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6580          */
6581         if (crtc_state->enable &&
6582             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6583                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6584                 return -EINVAL;
6585         }
6586
6587         /* In some use cases, like reset, no stream is attached */
6588         if (!dm_crtc_state->stream)
6589                 return 0;
6590
6591         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6592                 return 0;
6593
6594         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6595         return ret;
6596 }
6597
6598 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6599                                       const struct drm_display_mode *mode,
6600                                       struct drm_display_mode *adjusted_mode)
6601 {
6602         return true;
6603 }
6604
6605 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6606         .disable = dm_crtc_helper_disable,
6607         .atomic_check = dm_crtc_helper_atomic_check,
6608         .mode_fixup = dm_crtc_helper_mode_fixup,
6609         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6610 };
6611
6612 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6613 {
6614
6615 }
6616
6617 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6618 {
6619         switch (display_color_depth) {
6620                 case COLOR_DEPTH_666:
6621                         return 6;
6622                 case COLOR_DEPTH_888:
6623                         return 8;
6624                 case COLOR_DEPTH_101010:
6625                         return 10;
6626                 case COLOR_DEPTH_121212:
6627                         return 12;
6628                 case COLOR_DEPTH_141414:
6629                         return 14;
6630                 case COLOR_DEPTH_161616:
6631                         return 16;
6632                 default:
6633                         break;
6634                 }
6635         return 0;
6636 }
6637
6638 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6639                                           struct drm_crtc_state *crtc_state,
6640                                           struct drm_connector_state *conn_state)
6641 {
6642         struct drm_atomic_state *state = crtc_state->state;
6643         struct drm_connector *connector = conn_state->connector;
6644         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6645         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6646         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6647         struct drm_dp_mst_topology_mgr *mst_mgr;
6648         struct drm_dp_mst_port *mst_port;
6649         enum dc_color_depth color_depth;
6650         int clock, bpp = 0;
6651         bool is_y420 = false;
6652
6653         if (!aconnector->port || !aconnector->dc_sink)
6654                 return 0;
6655
6656         mst_port = aconnector->port;
6657         mst_mgr = &aconnector->mst_port->mst_mgr;
6658
6659         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6660                 return 0;
6661
6662         if (!state->duplicated) {
6663                 int max_bpc = conn_state->max_requested_bpc;
6664                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6665                                 aconnector->force_yuv420_output;
6666                 color_depth = convert_color_depth_from_display_info(connector,
6667                                                                     is_y420,
6668                                                                     max_bpc);
6669                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6670                 clock = adjusted_mode->clock;
6671                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6672         }
6673         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6674                                                                            mst_mgr,
6675                                                                            mst_port,
6676                                                                            dm_new_connector_state->pbn,
6677                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6678         if (dm_new_connector_state->vcpi_slots < 0) {
6679                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6680                 return dm_new_connector_state->vcpi_slots;
6681         }
6682         return 0;
6683 }
6684
6685 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6686         .disable = dm_encoder_helper_disable,
6687         .atomic_check = dm_encoder_helper_atomic_check
6688 };
6689
6690 #if defined(CONFIG_DRM_AMD_DC_DCN)
6691 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6692                                             struct dc_state *dc_state)
6693 {
6694         struct dc_stream_state *stream = NULL;
6695         struct drm_connector *connector;
6696         struct drm_connector_state *new_con_state;
6697         struct amdgpu_dm_connector *aconnector;
6698         struct dm_connector_state *dm_conn_state;
6699         int i, j, clock, bpp;
6700         int vcpi, pbn_div, pbn = 0;
6701
6702         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6703
6704                 aconnector = to_amdgpu_dm_connector(connector);
6705
6706                 if (!aconnector->port)
6707                         continue;
6708
6709                 if (!new_con_state || !new_con_state->crtc)
6710                         continue;
6711
6712                 dm_conn_state = to_dm_connector_state(new_con_state);
6713
6714                 for (j = 0; j < dc_state->stream_count; j++) {
6715                         stream = dc_state->streams[j];
6716                         if (!stream)
6717                                 continue;
6718
6719                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6720                                 break;
6721
6722                         stream = NULL;
6723                 }
6724
6725                 if (!stream)
6726                         continue;
6727
6728                 if (stream->timing.flags.DSC != 1) {
6729                         drm_dp_mst_atomic_enable_dsc(state,
6730                                                      aconnector->port,
6731                                                      dm_conn_state->pbn,
6732                                                      0,
6733                                                      false);
6734                         continue;
6735                 }
6736
6737                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6738                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6739                 clock = stream->timing.pix_clk_100hz / 10;
6740                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6741                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6742                                                     aconnector->port,
6743                                                     pbn, pbn_div,
6744                                                     true);
6745                 if (vcpi < 0)
6746                         return vcpi;
6747
6748                 dm_conn_state->pbn = pbn;
6749                 dm_conn_state->vcpi_slots = vcpi;
6750         }
6751         return 0;
6752 }
6753 #endif
6754
6755 static void dm_drm_plane_reset(struct drm_plane *plane)
6756 {
6757         struct dm_plane_state *amdgpu_state = NULL;
6758
6759         if (plane->state)
6760                 plane->funcs->atomic_destroy_state(plane, plane->state);
6761
6762         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6763         WARN_ON(amdgpu_state == NULL);
6764
6765         if (amdgpu_state)
6766                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6767 }
6768
6769 static struct drm_plane_state *
6770 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6771 {
6772         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6773
6774         old_dm_plane_state = to_dm_plane_state(plane->state);
6775         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6776         if (!dm_plane_state)
6777                 return NULL;
6778
6779         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6780
6781         if (old_dm_plane_state->dc_state) {
6782                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6783                 dc_plane_state_retain(dm_plane_state->dc_state);
6784         }
6785
6786         return &dm_plane_state->base;
6787 }
6788
6789 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6790                                 struct drm_plane_state *state)
6791 {
6792         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6793
6794         if (dm_plane_state->dc_state)
6795                 dc_plane_state_release(dm_plane_state->dc_state);
6796
6797         drm_atomic_helper_plane_destroy_state(plane, state);
6798 }
6799
6800 static const struct drm_plane_funcs dm_plane_funcs = {
6801         .update_plane   = drm_atomic_helper_update_plane,
6802         .disable_plane  = drm_atomic_helper_disable_plane,
6803         .destroy        = drm_primary_helper_destroy,
6804         .reset = dm_drm_plane_reset,
6805         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6806         .atomic_destroy_state = dm_drm_plane_destroy_state,
6807         .format_mod_supported = dm_plane_format_mod_supported,
6808 };
6809
6810 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6811                                       struct drm_plane_state *new_state)
6812 {
6813         struct amdgpu_framebuffer *afb;
6814         struct drm_gem_object *obj;
6815         struct amdgpu_device *adev;
6816         struct amdgpu_bo *rbo;
6817         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6818         struct list_head list;
6819         struct ttm_validate_buffer tv;
6820         struct ww_acquire_ctx ticket;
6821         uint32_t domain;
6822         int r;
6823
6824         if (!new_state->fb) {
6825                 DRM_DEBUG_KMS("No FB bound\n");
6826                 return 0;
6827         }
6828
6829         afb = to_amdgpu_framebuffer(new_state->fb);
6830         obj = new_state->fb->obj[0];
6831         rbo = gem_to_amdgpu_bo(obj);
6832         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6833         INIT_LIST_HEAD(&list);
6834
6835         tv.bo = &rbo->tbo;
6836         tv.num_shared = 1;
6837         list_add(&tv.head, &list);
6838
6839         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6840         if (r) {
6841                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6842                 return r;
6843         }
6844
6845         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6846                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6847         else
6848                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6849
6850         r = amdgpu_bo_pin(rbo, domain);
6851         if (unlikely(r != 0)) {
6852                 if (r != -ERESTARTSYS)
6853                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6854                 ttm_eu_backoff_reservation(&ticket, &list);
6855                 return r;
6856         }
6857
6858         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6859         if (unlikely(r != 0)) {
6860                 amdgpu_bo_unpin(rbo);
6861                 ttm_eu_backoff_reservation(&ticket, &list);
6862                 DRM_ERROR("%p bind failed\n", rbo);
6863                 return r;
6864         }
6865
6866         ttm_eu_backoff_reservation(&ticket, &list);
6867
6868         afb->address = amdgpu_bo_gpu_offset(rbo);
6869
6870         amdgpu_bo_ref(rbo);
6871
6872         /**
6873          * We don't do surface updates on planes that have been newly created,
6874          * but we also don't have the afb->address during atomic check.
6875          *
6876          * Fill in buffer attributes depending on the address here, but only on
6877          * newly created planes since they're not being used by DC yet and this
6878          * won't modify global state.
6879          */
6880         dm_plane_state_old = to_dm_plane_state(plane->state);
6881         dm_plane_state_new = to_dm_plane_state(new_state);
6882
6883         if (dm_plane_state_new->dc_state &&
6884             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6885                 struct dc_plane_state *plane_state =
6886                         dm_plane_state_new->dc_state;
6887                 bool force_disable_dcc = !plane_state->dcc.enable;
6888
6889                 fill_plane_buffer_attributes(
6890                         adev, afb, plane_state->format, plane_state->rotation,
6891                         afb->tiling_flags,
6892                         &plane_state->tiling_info, &plane_state->plane_size,
6893                         &plane_state->dcc, &plane_state->address,
6894                         afb->tmz_surface, force_disable_dcc);
6895         }
6896
6897         return 0;
6898 }
6899
6900 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6901                                        struct drm_plane_state *old_state)
6902 {
6903         struct amdgpu_bo *rbo;
6904         int r;
6905
6906         if (!old_state->fb)
6907                 return;
6908
6909         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6910         r = amdgpu_bo_reserve(rbo, false);
6911         if (unlikely(r)) {
6912                 DRM_ERROR("failed to reserve rbo before unpin\n");
6913                 return;
6914         }
6915
6916         amdgpu_bo_unpin(rbo);
6917         amdgpu_bo_unreserve(rbo);
6918         amdgpu_bo_unref(&rbo);
6919 }
6920
6921 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6922                                        struct drm_crtc_state *new_crtc_state)
6923 {
6924         struct drm_framebuffer *fb = state->fb;
6925         int min_downscale, max_upscale;
6926         int min_scale = 0;
6927         int max_scale = INT_MAX;
6928
6929         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6930         if (fb && state->crtc) {
6931                 /* Validate viewport to cover the case when only the position changes */
6932                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6933                         int viewport_width = state->crtc_w;
6934                         int viewport_height = state->crtc_h;
6935
6936                         if (state->crtc_x < 0)
6937                                 viewport_width += state->crtc_x;
6938                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6939                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6940
6941                         if (state->crtc_y < 0)
6942                                 viewport_height += state->crtc_y;
6943                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6944                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6945
6946                         if (viewport_width < 0 || viewport_height < 0) {
6947                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6948                                 return -EINVAL;
6949                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6950                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6951                                 return -EINVAL;
6952                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6953                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6954                                 return -EINVAL;
6955                         }
6956
6957                 }
6958
6959                 /* Get min/max allowed scaling factors from plane caps. */
6960                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6961                                              &min_downscale, &max_upscale);
6962                 /*
6963                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6964                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6965                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6966                  */
6967                 min_scale = (1000 << 16) / max_upscale;
6968                 max_scale = (1000 << 16) / min_downscale;
6969         }
6970
6971         return drm_atomic_helper_check_plane_state(
6972                 state, new_crtc_state, min_scale, max_scale, true, true);
6973 }
6974
6975 static int dm_plane_atomic_check(struct drm_plane *plane,
6976                                  struct drm_atomic_state *state)
6977 {
6978         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6979                                                                                  plane);
6980         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6981         struct dc *dc = adev->dm.dc;
6982         struct dm_plane_state *dm_plane_state;
6983         struct dc_scaling_info scaling_info;
6984         struct drm_crtc_state *new_crtc_state;
6985         int ret;
6986
6987         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6988
6989         dm_plane_state = to_dm_plane_state(new_plane_state);
6990
6991         if (!dm_plane_state->dc_state)
6992                 return 0;
6993
6994         new_crtc_state =
6995                 drm_atomic_get_new_crtc_state(state,
6996                                               new_plane_state->crtc);
6997         if (!new_crtc_state)
6998                 return -EINVAL;
6999
7000         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7001         if (ret)
7002                 return ret;
7003
7004         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7005         if (ret)
7006                 return ret;
7007
7008         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7009                 return 0;
7010
7011         return -EINVAL;
7012 }
7013
7014 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7015                                        struct drm_atomic_state *state)
7016 {
7017         /* Only support async updates on cursor planes. */
7018         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7019                 return -EINVAL;
7020
7021         return 0;
7022 }
7023
7024 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7025                                          struct drm_atomic_state *state)
7026 {
7027         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7028                                                                            plane);
7029         struct drm_plane_state *old_state =
7030                 drm_atomic_get_old_plane_state(state, plane);
7031
7032         trace_amdgpu_dm_atomic_update_cursor(new_state);
7033
7034         swap(plane->state->fb, new_state->fb);
7035
7036         plane->state->src_x = new_state->src_x;
7037         plane->state->src_y = new_state->src_y;
7038         plane->state->src_w = new_state->src_w;
7039         plane->state->src_h = new_state->src_h;
7040         plane->state->crtc_x = new_state->crtc_x;
7041         plane->state->crtc_y = new_state->crtc_y;
7042         plane->state->crtc_w = new_state->crtc_w;
7043         plane->state->crtc_h = new_state->crtc_h;
7044
7045         handle_cursor_update(plane, old_state);
7046 }
7047
7048 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7049         .prepare_fb = dm_plane_helper_prepare_fb,
7050         .cleanup_fb = dm_plane_helper_cleanup_fb,
7051         .atomic_check = dm_plane_atomic_check,
7052         .atomic_async_check = dm_plane_atomic_async_check,
7053         .atomic_async_update = dm_plane_atomic_async_update
7054 };
7055
7056 /*
7057  * TODO: these are currently initialized to rgb formats only.
7058  * For future use cases we should either initialize them dynamically based on
7059  * plane capabilities, or initialize this array to all formats, so internal drm
7060  * check will succeed, and let DC implement proper check
7061  */
7062 static const uint32_t rgb_formats[] = {
7063         DRM_FORMAT_XRGB8888,
7064         DRM_FORMAT_ARGB8888,
7065         DRM_FORMAT_RGBA8888,
7066         DRM_FORMAT_XRGB2101010,
7067         DRM_FORMAT_XBGR2101010,
7068         DRM_FORMAT_ARGB2101010,
7069         DRM_FORMAT_ABGR2101010,
7070         DRM_FORMAT_XBGR8888,
7071         DRM_FORMAT_ABGR8888,
7072         DRM_FORMAT_RGB565,
7073 };
7074
7075 static const uint32_t overlay_formats[] = {
7076         DRM_FORMAT_XRGB8888,
7077         DRM_FORMAT_ARGB8888,
7078         DRM_FORMAT_RGBA8888,
7079         DRM_FORMAT_XBGR8888,
7080         DRM_FORMAT_ABGR8888,
7081         DRM_FORMAT_RGB565
7082 };
7083
7084 static const u32 cursor_formats[] = {
7085         DRM_FORMAT_ARGB8888
7086 };
7087
7088 static int get_plane_formats(const struct drm_plane *plane,
7089                              const struct dc_plane_cap *plane_cap,
7090                              uint32_t *formats, int max_formats)
7091 {
7092         int i, num_formats = 0;
7093
7094         /*
7095          * TODO: Query support for each group of formats directly from
7096          * DC plane caps. This will require adding more formats to the
7097          * caps list.
7098          */
7099
7100         switch (plane->type) {
7101         case DRM_PLANE_TYPE_PRIMARY:
7102                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7103                         if (num_formats >= max_formats)
7104                                 break;
7105
7106                         formats[num_formats++] = rgb_formats[i];
7107                 }
7108
7109                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7110                         formats[num_formats++] = DRM_FORMAT_NV12;
7111                 if (plane_cap && plane_cap->pixel_format_support.p010)
7112                         formats[num_formats++] = DRM_FORMAT_P010;
7113                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7114                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7115                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7116                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7117                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7118                 }
7119                 break;
7120
7121         case DRM_PLANE_TYPE_OVERLAY:
7122                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7123                         if (num_formats >= max_formats)
7124                                 break;
7125
7126                         formats[num_formats++] = overlay_formats[i];
7127                 }
7128                 break;
7129
7130         case DRM_PLANE_TYPE_CURSOR:
7131                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7132                         if (num_formats >= max_formats)
7133                                 break;
7134
7135                         formats[num_formats++] = cursor_formats[i];
7136                 }
7137                 break;
7138         }
7139
7140         return num_formats;
7141 }
7142
7143 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7144                                 struct drm_plane *plane,
7145                                 unsigned long possible_crtcs,
7146                                 const struct dc_plane_cap *plane_cap)
7147 {
7148         uint32_t formats[32];
7149         int num_formats;
7150         int res = -EPERM;
7151         unsigned int supported_rotations;
7152         uint64_t *modifiers = NULL;
7153
7154         num_formats = get_plane_formats(plane, plane_cap, formats,
7155                                         ARRAY_SIZE(formats));
7156
7157         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7158         if (res)
7159                 return res;
7160
7161         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7162                                        &dm_plane_funcs, formats, num_formats,
7163                                        modifiers, plane->type, NULL);
7164         kfree(modifiers);
7165         if (res)
7166                 return res;
7167
7168         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7169             plane_cap && plane_cap->per_pixel_alpha) {
7170                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7171                                           BIT(DRM_MODE_BLEND_PREMULTI);
7172
7173                 drm_plane_create_alpha_property(plane);
7174                 drm_plane_create_blend_mode_property(plane, blend_caps);
7175         }
7176
7177         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7178             plane_cap &&
7179             (plane_cap->pixel_format_support.nv12 ||
7180              plane_cap->pixel_format_support.p010)) {
7181                 /* This only affects YUV formats. */
7182                 drm_plane_create_color_properties(
7183                         plane,
7184                         BIT(DRM_COLOR_YCBCR_BT601) |
7185                         BIT(DRM_COLOR_YCBCR_BT709) |
7186                         BIT(DRM_COLOR_YCBCR_BT2020),
7187                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7188                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7189                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7190         }
7191
7192         supported_rotations =
7193                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7194                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7195
7196         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7197             plane->type != DRM_PLANE_TYPE_CURSOR)
7198                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7199                                                    supported_rotations);
7200
7201         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7202
7203         /* Create (reset) the plane state */
7204         if (plane->funcs->reset)
7205                 plane->funcs->reset(plane);
7206
7207         return 0;
7208 }
7209
7210 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7211                                struct drm_plane *plane,
7212                                uint32_t crtc_index)
7213 {
7214         struct amdgpu_crtc *acrtc = NULL;
7215         struct drm_plane *cursor_plane;
7216
7217         int res = -ENOMEM;
7218
7219         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7220         if (!cursor_plane)
7221                 goto fail;
7222
7223         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7224         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7225
7226         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7227         if (!acrtc)
7228                 goto fail;
7229
7230         res = drm_crtc_init_with_planes(
7231                         dm->ddev,
7232                         &acrtc->base,
7233                         plane,
7234                         cursor_plane,
7235                         &amdgpu_dm_crtc_funcs, NULL);
7236
7237         if (res)
7238                 goto fail;
7239
7240         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7241
7242         /* Create (reset) the plane state */
7243         if (acrtc->base.funcs->reset)
7244                 acrtc->base.funcs->reset(&acrtc->base);
7245
7246         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7247         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7248
7249         acrtc->crtc_id = crtc_index;
7250         acrtc->base.enabled = false;
7251         acrtc->otg_inst = -1;
7252
7253         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7254         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7255                                    true, MAX_COLOR_LUT_ENTRIES);
7256         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7257
7258         return 0;
7259
7260 fail:
7261         kfree(acrtc);
7262         kfree(cursor_plane);
7263         return res;
7264 }
7265
7266
7267 static int to_drm_connector_type(enum signal_type st)
7268 {
7269         switch (st) {
7270         case SIGNAL_TYPE_HDMI_TYPE_A:
7271                 return DRM_MODE_CONNECTOR_HDMIA;
7272         case SIGNAL_TYPE_EDP:
7273                 return DRM_MODE_CONNECTOR_eDP;
7274         case SIGNAL_TYPE_LVDS:
7275                 return DRM_MODE_CONNECTOR_LVDS;
7276         case SIGNAL_TYPE_RGB:
7277                 return DRM_MODE_CONNECTOR_VGA;
7278         case SIGNAL_TYPE_DISPLAY_PORT:
7279         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7280                 return DRM_MODE_CONNECTOR_DisplayPort;
7281         case SIGNAL_TYPE_DVI_DUAL_LINK:
7282         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7283                 return DRM_MODE_CONNECTOR_DVID;
7284         case SIGNAL_TYPE_VIRTUAL:
7285                 return DRM_MODE_CONNECTOR_VIRTUAL;
7286
7287         default:
7288                 return DRM_MODE_CONNECTOR_Unknown;
7289         }
7290 }
7291
7292 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7293 {
7294         struct drm_encoder *encoder;
7295
7296         /* There is only one encoder per connector */
7297         drm_connector_for_each_possible_encoder(connector, encoder)
7298                 return encoder;
7299
7300         return NULL;
7301 }
7302
7303 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7304 {
7305         struct drm_encoder *encoder;
7306         struct amdgpu_encoder *amdgpu_encoder;
7307
7308         encoder = amdgpu_dm_connector_to_encoder(connector);
7309
7310         if (encoder == NULL)
7311                 return;
7312
7313         amdgpu_encoder = to_amdgpu_encoder(encoder);
7314
7315         amdgpu_encoder->native_mode.clock = 0;
7316
7317         if (!list_empty(&connector->probed_modes)) {
7318                 struct drm_display_mode *preferred_mode = NULL;
7319
7320                 list_for_each_entry(preferred_mode,
7321                                     &connector->probed_modes,
7322                                     head) {
7323                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7324                                 amdgpu_encoder->native_mode = *preferred_mode;
7325
7326                         break;
7327                 }
7328
7329         }
7330 }
7331
7332 static struct drm_display_mode *
7333 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7334                              char *name,
7335                              int hdisplay, int vdisplay)
7336 {
7337         struct drm_device *dev = encoder->dev;
7338         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7339         struct drm_display_mode *mode = NULL;
7340         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7341
7342         mode = drm_mode_duplicate(dev, native_mode);
7343
7344         if (mode == NULL)
7345                 return NULL;
7346
7347         mode->hdisplay = hdisplay;
7348         mode->vdisplay = vdisplay;
7349         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7350         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7351
7352         return mode;
7353
7354 }
7355
7356 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7357                                                  struct drm_connector *connector)
7358 {
7359         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7360         struct drm_display_mode *mode = NULL;
7361         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7362         struct amdgpu_dm_connector *amdgpu_dm_connector =
7363                                 to_amdgpu_dm_connector(connector);
7364         int i;
7365         int n;
7366         struct mode_size {
7367                 char name[DRM_DISPLAY_MODE_LEN];
7368                 int w;
7369                 int h;
7370         } common_modes[] = {
7371                 {  "640x480",  640,  480},
7372                 {  "800x600",  800,  600},
7373                 { "1024x768", 1024,  768},
7374                 { "1280x720", 1280,  720},
7375                 { "1280x800", 1280,  800},
7376                 {"1280x1024", 1280, 1024},
7377                 { "1440x900", 1440,  900},
7378                 {"1680x1050", 1680, 1050},
7379                 {"1600x1200", 1600, 1200},
7380                 {"1920x1080", 1920, 1080},
7381                 {"1920x1200", 1920, 1200}
7382         };
7383
7384         n = ARRAY_SIZE(common_modes);
7385
7386         for (i = 0; i < n; i++) {
7387                 struct drm_display_mode *curmode = NULL;
7388                 bool mode_existed = false;
7389
7390                 if (common_modes[i].w > native_mode->hdisplay ||
7391                     common_modes[i].h > native_mode->vdisplay ||
7392                    (common_modes[i].w == native_mode->hdisplay &&
7393                     common_modes[i].h == native_mode->vdisplay))
7394                         continue;
7395
7396                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7397                         if (common_modes[i].w == curmode->hdisplay &&
7398                             common_modes[i].h == curmode->vdisplay) {
7399                                 mode_existed = true;
7400                                 break;
7401                         }
7402                 }
7403
7404                 if (mode_existed)
7405                         continue;
7406
7407                 mode = amdgpu_dm_create_common_mode(encoder,
7408                                 common_modes[i].name, common_modes[i].w,
7409                                 common_modes[i].h);
7410                 drm_mode_probed_add(connector, mode);
7411                 amdgpu_dm_connector->num_modes++;
7412         }
7413 }
7414
7415 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7416                                               struct edid *edid)
7417 {
7418         struct amdgpu_dm_connector *amdgpu_dm_connector =
7419                         to_amdgpu_dm_connector(connector);
7420
7421         if (edid) {
7422                 /* empty probed_modes */
7423                 INIT_LIST_HEAD(&connector->probed_modes);
7424                 amdgpu_dm_connector->num_modes =
7425                                 drm_add_edid_modes(connector, edid);
7426
7427                 /* sorting the probed modes before calling function
7428                  * amdgpu_dm_get_native_mode() since EDID can have
7429                  * more than one preferred mode. The modes that are
7430                  * later in the probed mode list could be of higher
7431                  * and preferred resolution. For example, 3840x2160
7432                  * resolution in base EDID preferred timing and 4096x2160
7433                  * preferred resolution in DID extension block later.
7434                  */
7435                 drm_mode_sort(&connector->probed_modes);
7436                 amdgpu_dm_get_native_mode(connector);
7437
7438                 /* Freesync capabilities are reset by calling
7439                  * drm_add_edid_modes() and need to be
7440                  * restored here.
7441                  */
7442                 amdgpu_dm_update_freesync_caps(connector, edid);
7443         } else {
7444                 amdgpu_dm_connector->num_modes = 0;
7445         }
7446 }
7447
7448 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7449                               struct drm_display_mode *mode)
7450 {
7451         struct drm_display_mode *m;
7452
7453         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7454                 if (drm_mode_equal(m, mode))
7455                         return true;
7456         }
7457
7458         return false;
7459 }
7460
7461 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7462 {
7463         const struct drm_display_mode *m;
7464         struct drm_display_mode *new_mode;
7465         uint i;
7466         uint32_t new_modes_count = 0;
7467
7468         /* Standard FPS values
7469          *
7470          * 23.976   - TV/NTSC
7471          * 24       - Cinema
7472          * 25       - TV/PAL
7473          * 29.97    - TV/NTSC
7474          * 30       - TV/NTSC
7475          * 48       - Cinema HFR
7476          * 50       - TV/PAL
7477          * 60       - Commonly used
7478          * 48,72,96 - Multiples of 24
7479          */
7480         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7481                                          48000, 50000, 60000, 72000, 96000 };
7482
7483         /*
7484          * Find mode with highest refresh rate with the same resolution
7485          * as the preferred mode. Some monitors report a preferred mode
7486          * with lower resolution than the highest refresh rate supported.
7487          */
7488
7489         m = get_highest_refresh_rate_mode(aconnector, true);
7490         if (!m)
7491                 return 0;
7492
7493         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7494                 uint64_t target_vtotal, target_vtotal_diff;
7495                 uint64_t num, den;
7496
7497                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7498                         continue;
7499
7500                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7501                     common_rates[i] > aconnector->max_vfreq * 1000)
7502                         continue;
7503
7504                 num = (unsigned long long)m->clock * 1000 * 1000;
7505                 den = common_rates[i] * (unsigned long long)m->htotal;
7506                 target_vtotal = div_u64(num, den);
7507                 target_vtotal_diff = target_vtotal - m->vtotal;
7508
7509                 /* Check for illegal modes */
7510                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7511                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7512                     m->vtotal + target_vtotal_diff < m->vsync_end)
7513                         continue;
7514
7515                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7516                 if (!new_mode)
7517                         goto out;
7518
7519                 new_mode->vtotal += (u16)target_vtotal_diff;
7520                 new_mode->vsync_start += (u16)target_vtotal_diff;
7521                 new_mode->vsync_end += (u16)target_vtotal_diff;
7522                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7523                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7524
7525                 if (!is_duplicate_mode(aconnector, new_mode)) {
7526                         drm_mode_probed_add(&aconnector->base, new_mode);
7527                         new_modes_count += 1;
7528                 } else
7529                         drm_mode_destroy(aconnector->base.dev, new_mode);
7530         }
7531  out:
7532         return new_modes_count;
7533 }
7534
7535 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7536                                                    struct edid *edid)
7537 {
7538         struct amdgpu_dm_connector *amdgpu_dm_connector =
7539                 to_amdgpu_dm_connector(connector);
7540
7541         if (!(amdgpu_freesync_vid_mode && edid))
7542                 return;
7543
7544         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7545                 amdgpu_dm_connector->num_modes +=
7546                         add_fs_modes(amdgpu_dm_connector);
7547 }
7548
7549 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7550 {
7551         struct amdgpu_dm_connector *amdgpu_dm_connector =
7552                         to_amdgpu_dm_connector(connector);
7553         struct drm_encoder *encoder;
7554         struct edid *edid = amdgpu_dm_connector->edid;
7555
7556         encoder = amdgpu_dm_connector_to_encoder(connector);
7557
7558         if (!drm_edid_is_valid(edid)) {
7559                 amdgpu_dm_connector->num_modes =
7560                                 drm_add_modes_noedid(connector, 640, 480);
7561         } else {
7562                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7563                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7564                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7565         }
7566         amdgpu_dm_fbc_init(connector);
7567
7568         return amdgpu_dm_connector->num_modes;
7569 }
7570
7571 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7572                                      struct amdgpu_dm_connector *aconnector,
7573                                      int connector_type,
7574                                      struct dc_link *link,
7575                                      int link_index)
7576 {
7577         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7578
7579         /*
7580          * Some of the properties below require access to state, like bpc.
7581          * Allocate some default initial connector state with our reset helper.
7582          */
7583         if (aconnector->base.funcs->reset)
7584                 aconnector->base.funcs->reset(&aconnector->base);
7585
7586         aconnector->connector_id = link_index;
7587         aconnector->dc_link = link;
7588         aconnector->base.interlace_allowed = false;
7589         aconnector->base.doublescan_allowed = false;
7590         aconnector->base.stereo_allowed = false;
7591         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7592         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7593         aconnector->audio_inst = -1;
7594         mutex_init(&aconnector->hpd_lock);
7595
7596         /*
7597          * configure support HPD hot plug connector_>polled default value is 0
7598          * which means HPD hot plug not supported
7599          */
7600         switch (connector_type) {
7601         case DRM_MODE_CONNECTOR_HDMIA:
7602                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7603                 aconnector->base.ycbcr_420_allowed =
7604                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7605                 break;
7606         case DRM_MODE_CONNECTOR_DisplayPort:
7607                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7608                 aconnector->base.ycbcr_420_allowed =
7609                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7610                 break;
7611         case DRM_MODE_CONNECTOR_DVID:
7612                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7613                 break;
7614         default:
7615                 break;
7616         }
7617
7618         drm_object_attach_property(&aconnector->base.base,
7619                                 dm->ddev->mode_config.scaling_mode_property,
7620                                 DRM_MODE_SCALE_NONE);
7621
7622         drm_object_attach_property(&aconnector->base.base,
7623                                 adev->mode_info.underscan_property,
7624                                 UNDERSCAN_OFF);
7625         drm_object_attach_property(&aconnector->base.base,
7626                                 adev->mode_info.underscan_hborder_property,
7627                                 0);
7628         drm_object_attach_property(&aconnector->base.base,
7629                                 adev->mode_info.underscan_vborder_property,
7630                                 0);
7631
7632         if (!aconnector->mst_port)
7633                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7634
7635         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7636         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7637         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7638
7639         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7640             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7641                 drm_object_attach_property(&aconnector->base.base,
7642                                 adev->mode_info.abm_level_property, 0);
7643         }
7644
7645         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7646             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7647             connector_type == DRM_MODE_CONNECTOR_eDP) {
7648                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7649
7650                 if (!aconnector->mst_port)
7651                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7652
7653 #ifdef CONFIG_DRM_AMD_DC_HDCP
7654                 if (adev->dm.hdcp_workqueue)
7655                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7656 #endif
7657         }
7658 }
7659
7660 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7661                               struct i2c_msg *msgs, int num)
7662 {
7663         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7664         struct ddc_service *ddc_service = i2c->ddc_service;
7665         struct i2c_command cmd;
7666         int i;
7667         int result = -EIO;
7668
7669         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7670
7671         if (!cmd.payloads)
7672                 return result;
7673
7674         cmd.number_of_payloads = num;
7675         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7676         cmd.speed = 100;
7677
7678         for (i = 0; i < num; i++) {
7679                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7680                 cmd.payloads[i].address = msgs[i].addr;
7681                 cmd.payloads[i].length = msgs[i].len;
7682                 cmd.payloads[i].data = msgs[i].buf;
7683         }
7684
7685         if (dc_submit_i2c(
7686                         ddc_service->ctx->dc,
7687                         ddc_service->ddc_pin->hw_info.ddc_channel,
7688                         &cmd))
7689                 result = num;
7690
7691         kfree(cmd.payloads);
7692         return result;
7693 }
7694
7695 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7696 {
7697         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7698 }
7699
7700 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7701         .master_xfer = amdgpu_dm_i2c_xfer,
7702         .functionality = amdgpu_dm_i2c_func,
7703 };
7704
7705 static struct amdgpu_i2c_adapter *
7706 create_i2c(struct ddc_service *ddc_service,
7707            int link_index,
7708            int *res)
7709 {
7710         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7711         struct amdgpu_i2c_adapter *i2c;
7712
7713         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7714         if (!i2c)
7715                 return NULL;
7716         i2c->base.owner = THIS_MODULE;
7717         i2c->base.class = I2C_CLASS_DDC;
7718         i2c->base.dev.parent = &adev->pdev->dev;
7719         i2c->base.algo = &amdgpu_dm_i2c_algo;
7720         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7721         i2c_set_adapdata(&i2c->base, i2c);
7722         i2c->ddc_service = ddc_service;
7723         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7724
7725         return i2c;
7726 }
7727
7728
7729 /*
7730  * Note: this function assumes that dc_link_detect() was called for the
7731  * dc_link which will be represented by this aconnector.
7732  */
7733 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7734                                     struct amdgpu_dm_connector *aconnector,
7735                                     uint32_t link_index,
7736                                     struct amdgpu_encoder *aencoder)
7737 {
7738         int res = 0;
7739         int connector_type;
7740         struct dc *dc = dm->dc;
7741         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7742         struct amdgpu_i2c_adapter *i2c;
7743
7744         link->priv = aconnector;
7745
7746         DRM_DEBUG_DRIVER("%s()\n", __func__);
7747
7748         i2c = create_i2c(link->ddc, link->link_index, &res);
7749         if (!i2c) {
7750                 DRM_ERROR("Failed to create i2c adapter data\n");
7751                 return -ENOMEM;
7752         }
7753
7754         aconnector->i2c = i2c;
7755         res = i2c_add_adapter(&i2c->base);
7756
7757         if (res) {
7758                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7759                 goto out_free;
7760         }
7761
7762         connector_type = to_drm_connector_type(link->connector_signal);
7763
7764         res = drm_connector_init_with_ddc(
7765                         dm->ddev,
7766                         &aconnector->base,
7767                         &amdgpu_dm_connector_funcs,
7768                         connector_type,
7769                         &i2c->base);
7770
7771         if (res) {
7772                 DRM_ERROR("connector_init failed\n");
7773                 aconnector->connector_id = -1;
7774                 goto out_free;
7775         }
7776
7777         drm_connector_helper_add(
7778                         &aconnector->base,
7779                         &amdgpu_dm_connector_helper_funcs);
7780
7781         amdgpu_dm_connector_init_helper(
7782                 dm,
7783                 aconnector,
7784                 connector_type,
7785                 link,
7786                 link_index);
7787
7788         drm_connector_attach_encoder(
7789                 &aconnector->base, &aencoder->base);
7790
7791         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7792                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7793                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7794
7795 out_free:
7796         if (res) {
7797                 kfree(i2c);
7798                 aconnector->i2c = NULL;
7799         }
7800         return res;
7801 }
7802
7803 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7804 {
7805         switch (adev->mode_info.num_crtc) {
7806         case 1:
7807                 return 0x1;
7808         case 2:
7809                 return 0x3;
7810         case 3:
7811                 return 0x7;
7812         case 4:
7813                 return 0xf;
7814         case 5:
7815                 return 0x1f;
7816         case 6:
7817         default:
7818                 return 0x3f;
7819         }
7820 }
7821
7822 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7823                                   struct amdgpu_encoder *aencoder,
7824                                   uint32_t link_index)
7825 {
7826         struct amdgpu_device *adev = drm_to_adev(dev);
7827
7828         int res = drm_encoder_init(dev,
7829                                    &aencoder->base,
7830                                    &amdgpu_dm_encoder_funcs,
7831                                    DRM_MODE_ENCODER_TMDS,
7832                                    NULL);
7833
7834         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7835
7836         if (!res)
7837                 aencoder->encoder_id = link_index;
7838         else
7839                 aencoder->encoder_id = -1;
7840
7841         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7842
7843         return res;
7844 }
7845
7846 static void manage_dm_interrupts(struct amdgpu_device *adev,
7847                                  struct amdgpu_crtc *acrtc,
7848                                  bool enable)
7849 {
7850         /*
7851          * We have no guarantee that the frontend index maps to the same
7852          * backend index - some even map to more than one.
7853          *
7854          * TODO: Use a different interrupt or check DC itself for the mapping.
7855          */
7856         int irq_type =
7857                 amdgpu_display_crtc_idx_to_irq_type(
7858                         adev,
7859                         acrtc->crtc_id);
7860
7861         if (enable) {
7862                 drm_crtc_vblank_on(&acrtc->base);
7863                 amdgpu_irq_get(
7864                         adev,
7865                         &adev->pageflip_irq,
7866                         irq_type);
7867 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7868                 amdgpu_irq_get(
7869                         adev,
7870                         &adev->vline0_irq,
7871                         irq_type);
7872 #endif
7873         } else {
7874 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7875                 amdgpu_irq_put(
7876                         adev,
7877                         &adev->vline0_irq,
7878                         irq_type);
7879 #endif
7880                 amdgpu_irq_put(
7881                         adev,
7882                         &adev->pageflip_irq,
7883                         irq_type);
7884                 drm_crtc_vblank_off(&acrtc->base);
7885         }
7886 }
7887
7888 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7889                                       struct amdgpu_crtc *acrtc)
7890 {
7891         int irq_type =
7892                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7893
7894         /**
7895          * This reads the current state for the IRQ and force reapplies
7896          * the setting to hardware.
7897          */
7898         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7899 }
7900
7901 static bool
7902 is_scaling_state_different(const struct dm_connector_state *dm_state,
7903                            const struct dm_connector_state *old_dm_state)
7904 {
7905         if (dm_state->scaling != old_dm_state->scaling)
7906                 return true;
7907         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7908                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7909                         return true;
7910         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7911                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7912                         return true;
7913         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7914                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7915                 return true;
7916         return false;
7917 }
7918
7919 #ifdef CONFIG_DRM_AMD_DC_HDCP
7920 static bool is_content_protection_different(struct drm_connector_state *state,
7921                                             const struct drm_connector_state *old_state,
7922                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7923 {
7924         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7925         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7926
7927         /* Handle: Type0/1 change */
7928         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7929             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7930                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7931                 return true;
7932         }
7933
7934         /* CP is being re enabled, ignore this
7935          *
7936          * Handles:     ENABLED -> DESIRED
7937          */
7938         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7939             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7940                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7941                 return false;
7942         }
7943
7944         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7945          *
7946          * Handles:     UNDESIRED -> ENABLED
7947          */
7948         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7949             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7950                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7951
7952         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7953          * hot-plug, headless s3, dpms
7954          *
7955          * Handles:     DESIRED -> DESIRED (Special case)
7956          */
7957         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7958             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7959                 dm_con_state->update_hdcp = false;
7960                 return true;
7961         }
7962
7963         /*
7964          * Handles:     UNDESIRED -> UNDESIRED
7965          *              DESIRED -> DESIRED
7966          *              ENABLED -> ENABLED
7967          */
7968         if (old_state->content_protection == state->content_protection)
7969                 return false;
7970
7971         /*
7972          * Handles:     UNDESIRED -> DESIRED
7973          *              DESIRED -> UNDESIRED
7974          *              ENABLED -> UNDESIRED
7975          */
7976         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7977                 return true;
7978
7979         /*
7980          * Handles:     DESIRED -> ENABLED
7981          */
7982         return false;
7983 }
7984
7985 #endif
7986 static void remove_stream(struct amdgpu_device *adev,
7987                           struct amdgpu_crtc *acrtc,
7988                           struct dc_stream_state *stream)
7989 {
7990         /* this is the update mode case */
7991
7992         acrtc->otg_inst = -1;
7993         acrtc->enabled = false;
7994 }
7995
7996 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7997                                struct dc_cursor_position *position)
7998 {
7999         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8000         int x, y;
8001         int xorigin = 0, yorigin = 0;
8002
8003         if (!crtc || !plane->state->fb)
8004                 return 0;
8005
8006         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8007             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8008                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8009                           __func__,
8010                           plane->state->crtc_w,
8011                           plane->state->crtc_h);
8012                 return -EINVAL;
8013         }
8014
8015         x = plane->state->crtc_x;
8016         y = plane->state->crtc_y;
8017
8018         if (x <= -amdgpu_crtc->max_cursor_width ||
8019             y <= -amdgpu_crtc->max_cursor_height)
8020                 return 0;
8021
8022         if (x < 0) {
8023                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8024                 x = 0;
8025         }
8026         if (y < 0) {
8027                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8028                 y = 0;
8029         }
8030         position->enable = true;
8031         position->translate_by_source = true;
8032         position->x = x;
8033         position->y = y;
8034         position->x_hotspot = xorigin;
8035         position->y_hotspot = yorigin;
8036
8037         return 0;
8038 }
8039
8040 static void handle_cursor_update(struct drm_plane *plane,
8041                                  struct drm_plane_state *old_plane_state)
8042 {
8043         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8044         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8045         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8046         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8047         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8048         uint64_t address = afb ? afb->address : 0;
8049         struct dc_cursor_position position = {0};
8050         struct dc_cursor_attributes attributes;
8051         int ret;
8052
8053         if (!plane->state->fb && !old_plane_state->fb)
8054                 return;
8055
8056         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8057                       __func__,
8058                       amdgpu_crtc->crtc_id,
8059                       plane->state->crtc_w,
8060                       plane->state->crtc_h);
8061
8062         ret = get_cursor_position(plane, crtc, &position);
8063         if (ret)
8064                 return;
8065
8066         if (!position.enable) {
8067                 /* turn off cursor */
8068                 if (crtc_state && crtc_state->stream) {
8069                         mutex_lock(&adev->dm.dc_lock);
8070                         dc_stream_set_cursor_position(crtc_state->stream,
8071                                                       &position);
8072                         mutex_unlock(&adev->dm.dc_lock);
8073                 }
8074                 return;
8075         }
8076
8077         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8078         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8079
8080         memset(&attributes, 0, sizeof(attributes));
8081         attributes.address.high_part = upper_32_bits(address);
8082         attributes.address.low_part  = lower_32_bits(address);
8083         attributes.width             = plane->state->crtc_w;
8084         attributes.height            = plane->state->crtc_h;
8085         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8086         attributes.rotation_angle    = 0;
8087         attributes.attribute_flags.value = 0;
8088
8089         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8090
8091         if (crtc_state->stream) {
8092                 mutex_lock(&adev->dm.dc_lock);
8093                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8094                                                          &attributes))
8095                         DRM_ERROR("DC failed to set cursor attributes\n");
8096
8097                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8098                                                    &position))
8099                         DRM_ERROR("DC failed to set cursor position\n");
8100                 mutex_unlock(&adev->dm.dc_lock);
8101         }
8102 }
8103
8104 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8105 {
8106
8107         assert_spin_locked(&acrtc->base.dev->event_lock);
8108         WARN_ON(acrtc->event);
8109
8110         acrtc->event = acrtc->base.state->event;
8111
8112         /* Set the flip status */
8113         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8114
8115         /* Mark this event as consumed */
8116         acrtc->base.state->event = NULL;
8117
8118         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8119                      acrtc->crtc_id);
8120 }
8121
8122 static void update_freesync_state_on_stream(
8123         struct amdgpu_display_manager *dm,
8124         struct dm_crtc_state *new_crtc_state,
8125         struct dc_stream_state *new_stream,
8126         struct dc_plane_state *surface,
8127         u32 flip_timestamp_in_us)
8128 {
8129         struct mod_vrr_params vrr_params;
8130         struct dc_info_packet vrr_infopacket = {0};
8131         struct amdgpu_device *adev = dm->adev;
8132         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8133         unsigned long flags;
8134         bool pack_sdp_v1_3 = false;
8135
8136         if (!new_stream)
8137                 return;
8138
8139         /*
8140          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8141          * For now it's sufficient to just guard against these conditions.
8142          */
8143
8144         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8145                 return;
8146
8147         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8148         vrr_params = acrtc->dm_irq_params.vrr_params;
8149
8150         if (surface) {
8151                 mod_freesync_handle_preflip(
8152                         dm->freesync_module,
8153                         surface,
8154                         new_stream,
8155                         flip_timestamp_in_us,
8156                         &vrr_params);
8157
8158                 if (adev->family < AMDGPU_FAMILY_AI &&
8159                     amdgpu_dm_vrr_active(new_crtc_state)) {
8160                         mod_freesync_handle_v_update(dm->freesync_module,
8161                                                      new_stream, &vrr_params);
8162
8163                         /* Need to call this before the frame ends. */
8164                         dc_stream_adjust_vmin_vmax(dm->dc,
8165                                                    new_crtc_state->stream,
8166                                                    &vrr_params.adjust);
8167                 }
8168         }
8169
8170         mod_freesync_build_vrr_infopacket(
8171                 dm->freesync_module,
8172                 new_stream,
8173                 &vrr_params,
8174                 PACKET_TYPE_VRR,
8175                 TRANSFER_FUNC_UNKNOWN,
8176                 &vrr_infopacket,
8177                 pack_sdp_v1_3);
8178
8179         new_crtc_state->freesync_timing_changed |=
8180                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8181                         &vrr_params.adjust,
8182                         sizeof(vrr_params.adjust)) != 0);
8183
8184         new_crtc_state->freesync_vrr_info_changed |=
8185                 (memcmp(&new_crtc_state->vrr_infopacket,
8186                         &vrr_infopacket,
8187                         sizeof(vrr_infopacket)) != 0);
8188
8189         acrtc->dm_irq_params.vrr_params = vrr_params;
8190         new_crtc_state->vrr_infopacket = vrr_infopacket;
8191
8192         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8193         new_stream->vrr_infopacket = vrr_infopacket;
8194
8195         if (new_crtc_state->freesync_vrr_info_changed)
8196                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8197                               new_crtc_state->base.crtc->base.id,
8198                               (int)new_crtc_state->base.vrr_enabled,
8199                               (int)vrr_params.state);
8200
8201         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8202 }
8203
8204 static void update_stream_irq_parameters(
8205         struct amdgpu_display_manager *dm,
8206         struct dm_crtc_state *new_crtc_state)
8207 {
8208         struct dc_stream_state *new_stream = new_crtc_state->stream;
8209         struct mod_vrr_params vrr_params;
8210         struct mod_freesync_config config = new_crtc_state->freesync_config;
8211         struct amdgpu_device *adev = dm->adev;
8212         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8213         unsigned long flags;
8214
8215         if (!new_stream)
8216                 return;
8217
8218         /*
8219          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8220          * For now it's sufficient to just guard against these conditions.
8221          */
8222         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8223                 return;
8224
8225         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8226         vrr_params = acrtc->dm_irq_params.vrr_params;
8227
8228         if (new_crtc_state->vrr_supported &&
8229             config.min_refresh_in_uhz &&
8230             config.max_refresh_in_uhz) {
8231                 /*
8232                  * if freesync compatible mode was set, config.state will be set
8233                  * in atomic check
8234                  */
8235                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8236                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8237                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8238                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8239                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8240                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8241                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8242                 } else {
8243                         config.state = new_crtc_state->base.vrr_enabled ?
8244                                                      VRR_STATE_ACTIVE_VARIABLE :
8245                                                      VRR_STATE_INACTIVE;
8246                 }
8247         } else {
8248                 config.state = VRR_STATE_UNSUPPORTED;
8249         }
8250
8251         mod_freesync_build_vrr_params(dm->freesync_module,
8252                                       new_stream,
8253                                       &config, &vrr_params);
8254
8255         new_crtc_state->freesync_timing_changed |=
8256                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8257                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8258
8259         new_crtc_state->freesync_config = config;
8260         /* Copy state for access from DM IRQ handler */
8261         acrtc->dm_irq_params.freesync_config = config;
8262         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8263         acrtc->dm_irq_params.vrr_params = vrr_params;
8264         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8265 }
8266
8267 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8268                                             struct dm_crtc_state *new_state)
8269 {
8270         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8271         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8272
8273         if (!old_vrr_active && new_vrr_active) {
8274                 /* Transition VRR inactive -> active:
8275                  * While VRR is active, we must not disable vblank irq, as a
8276                  * reenable after disable would compute bogus vblank/pflip
8277                  * timestamps if it likely happened inside display front-porch.
8278                  *
8279                  * We also need vupdate irq for the actual core vblank handling
8280                  * at end of vblank.
8281                  */
8282                 dm_set_vupdate_irq(new_state->base.crtc, true);
8283                 drm_crtc_vblank_get(new_state->base.crtc);
8284                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8285                                  __func__, new_state->base.crtc->base.id);
8286         } else if (old_vrr_active && !new_vrr_active) {
8287                 /* Transition VRR active -> inactive:
8288                  * Allow vblank irq disable again for fixed refresh rate.
8289                  */
8290                 dm_set_vupdate_irq(new_state->base.crtc, false);
8291                 drm_crtc_vblank_put(new_state->base.crtc);
8292                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8293                                  __func__, new_state->base.crtc->base.id);
8294         }
8295 }
8296
8297 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8298 {
8299         struct drm_plane *plane;
8300         struct drm_plane_state *old_plane_state;
8301         int i;
8302
8303         /*
8304          * TODO: Make this per-stream so we don't issue redundant updates for
8305          * commits with multiple streams.
8306          */
8307         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8308                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8309                         handle_cursor_update(plane, old_plane_state);
8310 }
8311
8312 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8313                                     struct dc_state *dc_state,
8314                                     struct drm_device *dev,
8315                                     struct amdgpu_display_manager *dm,
8316                                     struct drm_crtc *pcrtc,
8317                                     bool wait_for_vblank)
8318 {
8319         uint32_t i;
8320         uint64_t timestamp_ns;
8321         struct drm_plane *plane;
8322         struct drm_plane_state *old_plane_state, *new_plane_state;
8323         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8324         struct drm_crtc_state *new_pcrtc_state =
8325                         drm_atomic_get_new_crtc_state(state, pcrtc);
8326         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8327         struct dm_crtc_state *dm_old_crtc_state =
8328                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8329         int planes_count = 0, vpos, hpos;
8330         long r;
8331         unsigned long flags;
8332         struct amdgpu_bo *abo;
8333         uint32_t target_vblank, last_flip_vblank;
8334         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8335         bool pflip_present = false;
8336         struct {
8337                 struct dc_surface_update surface_updates[MAX_SURFACES];
8338                 struct dc_plane_info plane_infos[MAX_SURFACES];
8339                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8340                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8341                 struct dc_stream_update stream_update;
8342         } *bundle;
8343
8344         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8345
8346         if (!bundle) {
8347                 dm_error("Failed to allocate update bundle\n");
8348                 goto cleanup;
8349         }
8350
8351         /*
8352          * Disable the cursor first if we're disabling all the planes.
8353          * It'll remain on the screen after the planes are re-enabled
8354          * if we don't.
8355          */
8356         if (acrtc_state->active_planes == 0)
8357                 amdgpu_dm_commit_cursors(state);
8358
8359         /* update planes when needed */
8360         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8361                 struct drm_crtc *crtc = new_plane_state->crtc;
8362                 struct drm_crtc_state *new_crtc_state;
8363                 struct drm_framebuffer *fb = new_plane_state->fb;
8364                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8365                 bool plane_needs_flip;
8366                 struct dc_plane_state *dc_plane;
8367                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8368
8369                 /* Cursor plane is handled after stream updates */
8370                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8371                         continue;
8372
8373                 if (!fb || !crtc || pcrtc != crtc)
8374                         continue;
8375
8376                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8377                 if (!new_crtc_state->active)
8378                         continue;
8379
8380                 dc_plane = dm_new_plane_state->dc_state;
8381
8382                 bundle->surface_updates[planes_count].surface = dc_plane;
8383                 if (new_pcrtc_state->color_mgmt_changed) {
8384                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8385                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8386                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8387                 }
8388
8389                 fill_dc_scaling_info(new_plane_state,
8390                                      &bundle->scaling_infos[planes_count]);
8391
8392                 bundle->surface_updates[planes_count].scaling_info =
8393                         &bundle->scaling_infos[planes_count];
8394
8395                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8396
8397                 pflip_present = pflip_present || plane_needs_flip;
8398
8399                 if (!plane_needs_flip) {
8400                         planes_count += 1;
8401                         continue;
8402                 }
8403
8404                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8405
8406                 /*
8407                  * Wait for all fences on this FB. Do limited wait to avoid
8408                  * deadlock during GPU reset when this fence will not signal
8409                  * but we hold reservation lock for the BO.
8410                  */
8411                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8412                                                         false,
8413                                                         msecs_to_jiffies(5000));
8414                 if (unlikely(r <= 0))
8415                         DRM_ERROR("Waiting for fences timed out!");
8416
8417                 fill_dc_plane_info_and_addr(
8418                         dm->adev, new_plane_state,
8419                         afb->tiling_flags,
8420                         &bundle->plane_infos[planes_count],
8421                         &bundle->flip_addrs[planes_count].address,
8422                         afb->tmz_surface, false);
8423
8424                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8425                                  new_plane_state->plane->index,
8426                                  bundle->plane_infos[planes_count].dcc.enable);
8427
8428                 bundle->surface_updates[planes_count].plane_info =
8429                         &bundle->plane_infos[planes_count];
8430
8431                 /*
8432                  * Only allow immediate flips for fast updates that don't
8433                  * change FB pitch, DCC state, rotation or mirroing.
8434                  */
8435                 bundle->flip_addrs[planes_count].flip_immediate =
8436                         crtc->state->async_flip &&
8437                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8438
8439                 timestamp_ns = ktime_get_ns();
8440                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8441                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8442                 bundle->surface_updates[planes_count].surface = dc_plane;
8443
8444                 if (!bundle->surface_updates[planes_count].surface) {
8445                         DRM_ERROR("No surface for CRTC: id=%d\n",
8446                                         acrtc_attach->crtc_id);
8447                         continue;
8448                 }
8449
8450                 if (plane == pcrtc->primary)
8451                         update_freesync_state_on_stream(
8452                                 dm,
8453                                 acrtc_state,
8454                                 acrtc_state->stream,
8455                                 dc_plane,
8456                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8457
8458                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8459                                  __func__,
8460                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8461                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8462
8463                 planes_count += 1;
8464
8465         }
8466
8467         if (pflip_present) {
8468                 if (!vrr_active) {
8469                         /* Use old throttling in non-vrr fixed refresh rate mode
8470                          * to keep flip scheduling based on target vblank counts
8471                          * working in a backwards compatible way, e.g., for
8472                          * clients using the GLX_OML_sync_control extension or
8473                          * DRI3/Present extension with defined target_msc.
8474                          */
8475                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8476                 }
8477                 else {
8478                         /* For variable refresh rate mode only:
8479                          * Get vblank of last completed flip to avoid > 1 vrr
8480                          * flips per video frame by use of throttling, but allow
8481                          * flip programming anywhere in the possibly large
8482                          * variable vrr vblank interval for fine-grained flip
8483                          * timing control and more opportunity to avoid stutter
8484                          * on late submission of flips.
8485                          */
8486                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8487                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8488                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8489                 }
8490
8491                 target_vblank = last_flip_vblank + wait_for_vblank;
8492
8493                 /*
8494                  * Wait until we're out of the vertical blank period before the one
8495                  * targeted by the flip
8496                  */
8497                 while ((acrtc_attach->enabled &&
8498                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8499                                                             0, &vpos, &hpos, NULL,
8500                                                             NULL, &pcrtc->hwmode)
8501                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8502                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8503                         (int)(target_vblank -
8504                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8505                         usleep_range(1000, 1100);
8506                 }
8507
8508                 /**
8509                  * Prepare the flip event for the pageflip interrupt to handle.
8510                  *
8511                  * This only works in the case where we've already turned on the
8512                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8513                  * from 0 -> n planes we have to skip a hardware generated event
8514                  * and rely on sending it from software.
8515                  */
8516                 if (acrtc_attach->base.state->event &&
8517                     acrtc_state->active_planes > 0) {
8518                         drm_crtc_vblank_get(pcrtc);
8519
8520                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8521
8522                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8523                         prepare_flip_isr(acrtc_attach);
8524
8525                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8526                 }
8527
8528                 if (acrtc_state->stream) {
8529                         if (acrtc_state->freesync_vrr_info_changed)
8530                                 bundle->stream_update.vrr_infopacket =
8531                                         &acrtc_state->stream->vrr_infopacket;
8532                 }
8533         }
8534
8535         /* Update the planes if changed or disable if we don't have any. */
8536         if ((planes_count || acrtc_state->active_planes == 0) &&
8537                 acrtc_state->stream) {
8538                 bundle->stream_update.stream = acrtc_state->stream;
8539                 if (new_pcrtc_state->mode_changed) {
8540                         bundle->stream_update.src = acrtc_state->stream->src;
8541                         bundle->stream_update.dst = acrtc_state->stream->dst;
8542                 }
8543
8544                 if (new_pcrtc_state->color_mgmt_changed) {
8545                         /*
8546                          * TODO: This isn't fully correct since we've actually
8547                          * already modified the stream in place.
8548                          */
8549                         bundle->stream_update.gamut_remap =
8550                                 &acrtc_state->stream->gamut_remap_matrix;
8551                         bundle->stream_update.output_csc_transform =
8552                                 &acrtc_state->stream->csc_color_matrix;
8553                         bundle->stream_update.out_transfer_func =
8554                                 acrtc_state->stream->out_transfer_func;
8555                 }
8556
8557                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8558                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8559                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8560
8561                 /*
8562                  * If FreeSync state on the stream has changed then we need to
8563                  * re-adjust the min/max bounds now that DC doesn't handle this
8564                  * as part of commit.
8565                  */
8566                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8567                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8568                         dc_stream_adjust_vmin_vmax(
8569                                 dm->dc, acrtc_state->stream,
8570                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8571                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8572                 }
8573                 mutex_lock(&dm->dc_lock);
8574                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8575                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8576                         amdgpu_dm_psr_disable(acrtc_state->stream);
8577
8578                 dc_commit_updates_for_stream(dm->dc,
8579                                                      bundle->surface_updates,
8580                                                      planes_count,
8581                                                      acrtc_state->stream,
8582                                                      &bundle->stream_update,
8583                                                      dc_state);
8584
8585                 /**
8586                  * Enable or disable the interrupts on the backend.
8587                  *
8588                  * Most pipes are put into power gating when unused.
8589                  *
8590                  * When power gating is enabled on a pipe we lose the
8591                  * interrupt enablement state when power gating is disabled.
8592                  *
8593                  * So we need to update the IRQ control state in hardware
8594                  * whenever the pipe turns on (since it could be previously
8595                  * power gated) or off (since some pipes can't be power gated
8596                  * on some ASICs).
8597                  */
8598                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8599                         dm_update_pflip_irq_state(drm_to_adev(dev),
8600                                                   acrtc_attach);
8601
8602                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8603                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8604                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8605                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8606                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8607                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8608                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8609                         amdgpu_dm_psr_enable(acrtc_state->stream);
8610                 }
8611
8612                 mutex_unlock(&dm->dc_lock);
8613         }
8614
8615         /*
8616          * Update cursor state *after* programming all the planes.
8617          * This avoids redundant programming in the case where we're going
8618          * to be disabling a single plane - those pipes are being disabled.
8619          */
8620         if (acrtc_state->active_planes)
8621                 amdgpu_dm_commit_cursors(state);
8622
8623 cleanup:
8624         kfree(bundle);
8625 }
8626
8627 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8628                                    struct drm_atomic_state *state)
8629 {
8630         struct amdgpu_device *adev = drm_to_adev(dev);
8631         struct amdgpu_dm_connector *aconnector;
8632         struct drm_connector *connector;
8633         struct drm_connector_state *old_con_state, *new_con_state;
8634         struct drm_crtc_state *new_crtc_state;
8635         struct dm_crtc_state *new_dm_crtc_state;
8636         const struct dc_stream_status *status;
8637         int i, inst;
8638
8639         /* Notify device removals. */
8640         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8641                 if (old_con_state->crtc != new_con_state->crtc) {
8642                         /* CRTC changes require notification. */
8643                         goto notify;
8644                 }
8645
8646                 if (!new_con_state->crtc)
8647                         continue;
8648
8649                 new_crtc_state = drm_atomic_get_new_crtc_state(
8650                         state, new_con_state->crtc);
8651
8652                 if (!new_crtc_state)
8653                         continue;
8654
8655                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8656                         continue;
8657
8658         notify:
8659                 aconnector = to_amdgpu_dm_connector(connector);
8660
8661                 mutex_lock(&adev->dm.audio_lock);
8662                 inst = aconnector->audio_inst;
8663                 aconnector->audio_inst = -1;
8664                 mutex_unlock(&adev->dm.audio_lock);
8665
8666                 amdgpu_dm_audio_eld_notify(adev, inst);
8667         }
8668
8669         /* Notify audio device additions. */
8670         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8671                 if (!new_con_state->crtc)
8672                         continue;
8673
8674                 new_crtc_state = drm_atomic_get_new_crtc_state(
8675                         state, new_con_state->crtc);
8676
8677                 if (!new_crtc_state)
8678                         continue;
8679
8680                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8681                         continue;
8682
8683                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8684                 if (!new_dm_crtc_state->stream)
8685                         continue;
8686
8687                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8688                 if (!status)
8689                         continue;
8690
8691                 aconnector = to_amdgpu_dm_connector(connector);
8692
8693                 mutex_lock(&adev->dm.audio_lock);
8694                 inst = status->audio_inst;
8695                 aconnector->audio_inst = inst;
8696                 mutex_unlock(&adev->dm.audio_lock);
8697
8698                 amdgpu_dm_audio_eld_notify(adev, inst);
8699         }
8700 }
8701
8702 /*
8703  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8704  * @crtc_state: the DRM CRTC state
8705  * @stream_state: the DC stream state.
8706  *
8707  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8708  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8709  */
8710 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8711                                                 struct dc_stream_state *stream_state)
8712 {
8713         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8714 }
8715
8716 /**
8717  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8718  * @state: The atomic state to commit
8719  *
8720  * This will tell DC to commit the constructed DC state from atomic_check,
8721  * programming the hardware. Any failures here implies a hardware failure, since
8722  * atomic check should have filtered anything non-kosher.
8723  */
8724 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8725 {
8726         struct drm_device *dev = state->dev;
8727         struct amdgpu_device *adev = drm_to_adev(dev);
8728         struct amdgpu_display_manager *dm = &adev->dm;
8729         struct dm_atomic_state *dm_state;
8730         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8731         uint32_t i, j;
8732         struct drm_crtc *crtc;
8733         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8734         unsigned long flags;
8735         bool wait_for_vblank = true;
8736         struct drm_connector *connector;
8737         struct drm_connector_state *old_con_state, *new_con_state;
8738         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8739         int crtc_disable_count = 0;
8740         bool mode_set_reset_required = false;
8741
8742         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8743
8744         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8745
8746         dm_state = dm_atomic_get_new_state(state);
8747         if (dm_state && dm_state->context) {
8748                 dc_state = dm_state->context;
8749         } else {
8750                 /* No state changes, retain current state. */
8751                 dc_state_temp = dc_create_state(dm->dc);
8752                 ASSERT(dc_state_temp);
8753                 dc_state = dc_state_temp;
8754                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8755         }
8756
8757         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8758                                        new_crtc_state, i) {
8759                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8760
8761                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8762
8763                 if (old_crtc_state->active &&
8764                     (!new_crtc_state->active ||
8765                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8766                         manage_dm_interrupts(adev, acrtc, false);
8767                         dc_stream_release(dm_old_crtc_state->stream);
8768                 }
8769         }
8770
8771         drm_atomic_helper_calc_timestamping_constants(state);
8772
8773         /* update changed items */
8774         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8775                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8776
8777                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8778                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8779
8780                 DRM_DEBUG_ATOMIC(
8781                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8782                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8783                         "connectors_changed:%d\n",
8784                         acrtc->crtc_id,
8785                         new_crtc_state->enable,
8786                         new_crtc_state->active,
8787                         new_crtc_state->planes_changed,
8788                         new_crtc_state->mode_changed,
8789                         new_crtc_state->active_changed,
8790                         new_crtc_state->connectors_changed);
8791
8792                 /* Disable cursor if disabling crtc */
8793                 if (old_crtc_state->active && !new_crtc_state->active) {
8794                         struct dc_cursor_position position;
8795
8796                         memset(&position, 0, sizeof(position));
8797                         mutex_lock(&dm->dc_lock);
8798                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8799                         mutex_unlock(&dm->dc_lock);
8800                 }
8801
8802                 /* Copy all transient state flags into dc state */
8803                 if (dm_new_crtc_state->stream) {
8804                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8805                                                             dm_new_crtc_state->stream);
8806                 }
8807
8808                 /* handles headless hotplug case, updating new_state and
8809                  * aconnector as needed
8810                  */
8811
8812                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8813
8814                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8815
8816                         if (!dm_new_crtc_state->stream) {
8817                                 /*
8818                                  * this could happen because of issues with
8819                                  * userspace notifications delivery.
8820                                  * In this case userspace tries to set mode on
8821                                  * display which is disconnected in fact.
8822                                  * dc_sink is NULL in this case on aconnector.
8823                                  * We expect reset mode will come soon.
8824                                  *
8825                                  * This can also happen when unplug is done
8826                                  * during resume sequence ended
8827                                  *
8828                                  * In this case, we want to pretend we still
8829                                  * have a sink to keep the pipe running so that
8830                                  * hw state is consistent with the sw state
8831                                  */
8832                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8833                                                 __func__, acrtc->base.base.id);
8834                                 continue;
8835                         }
8836
8837                         if (dm_old_crtc_state->stream)
8838                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8839
8840                         pm_runtime_get_noresume(dev->dev);
8841
8842                         acrtc->enabled = true;
8843                         acrtc->hw_mode = new_crtc_state->mode;
8844                         crtc->hwmode = new_crtc_state->mode;
8845                         mode_set_reset_required = true;
8846                 } else if (modereset_required(new_crtc_state)) {
8847                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8848                         /* i.e. reset mode */
8849                         if (dm_old_crtc_state->stream)
8850                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8851
8852                         mode_set_reset_required = true;
8853                 }
8854         } /* for_each_crtc_in_state() */
8855
8856         if (dc_state) {
8857                 /* if there mode set or reset, disable eDP PSR */
8858                 if (mode_set_reset_required)
8859                         amdgpu_dm_psr_disable_all(dm);
8860
8861                 dm_enable_per_frame_crtc_master_sync(dc_state);
8862                 mutex_lock(&dm->dc_lock);
8863                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8864 #if defined(CONFIG_DRM_AMD_DC_DCN)
8865                /* Allow idle optimization when vblank count is 0 for display off */
8866                if (dm->active_vblank_irq_count == 0)
8867                    dc_allow_idle_optimizations(dm->dc,true);
8868 #endif
8869                 mutex_unlock(&dm->dc_lock);
8870         }
8871
8872         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8873                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8874
8875                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8876
8877                 if (dm_new_crtc_state->stream != NULL) {
8878                         const struct dc_stream_status *status =
8879                                         dc_stream_get_status(dm_new_crtc_state->stream);
8880
8881                         if (!status)
8882                                 status = dc_stream_get_status_from_state(dc_state,
8883                                                                          dm_new_crtc_state->stream);
8884                         if (!status)
8885                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8886                         else
8887                                 acrtc->otg_inst = status->primary_otg_inst;
8888                 }
8889         }
8890 #ifdef CONFIG_DRM_AMD_DC_HDCP
8891         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8892                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8893                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8894                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8895
8896                 new_crtc_state = NULL;
8897
8898                 if (acrtc)
8899                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8900
8901                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8902
8903                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8904                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8905                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8906                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8907                         dm_new_con_state->update_hdcp = true;
8908                         continue;
8909                 }
8910
8911                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8912                         hdcp_update_display(
8913                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8914                                 new_con_state->hdcp_content_type,
8915                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8916         }
8917 #endif
8918
8919         /* Handle connector state changes */
8920         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8921                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8922                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8923                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8924                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8925                 struct dc_stream_update stream_update;
8926                 struct dc_info_packet hdr_packet;
8927                 struct dc_stream_status *status = NULL;
8928                 bool abm_changed, hdr_changed, scaling_changed;
8929
8930                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8931                 memset(&stream_update, 0, sizeof(stream_update));
8932
8933                 if (acrtc) {
8934                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8935                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8936                 }
8937
8938                 /* Skip any modesets/resets */
8939                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8940                         continue;
8941
8942                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8943                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8944
8945                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8946                                                              dm_old_con_state);
8947
8948                 abm_changed = dm_new_crtc_state->abm_level !=
8949                               dm_old_crtc_state->abm_level;
8950
8951                 hdr_changed =
8952                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8953
8954                 if (!scaling_changed && !abm_changed && !hdr_changed)
8955                         continue;
8956
8957                 stream_update.stream = dm_new_crtc_state->stream;
8958                 if (scaling_changed) {
8959                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8960                                         dm_new_con_state, dm_new_crtc_state->stream);
8961
8962                         stream_update.src = dm_new_crtc_state->stream->src;
8963                         stream_update.dst = dm_new_crtc_state->stream->dst;
8964                 }
8965
8966                 if (abm_changed) {
8967                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8968
8969                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8970                 }
8971
8972                 if (hdr_changed) {
8973                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8974                         stream_update.hdr_static_metadata = &hdr_packet;
8975                 }
8976
8977                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8978                 WARN_ON(!status);
8979                 WARN_ON(!status->plane_count);
8980
8981                 /*
8982                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8983                  * Here we create an empty update on each plane.
8984                  * To fix this, DC should permit updating only stream properties.
8985                  */
8986                 for (j = 0; j < status->plane_count; j++)
8987                         dummy_updates[j].surface = status->plane_states[0];
8988
8989
8990                 mutex_lock(&dm->dc_lock);
8991                 dc_commit_updates_for_stream(dm->dc,
8992                                                      dummy_updates,
8993                                                      status->plane_count,
8994                                                      dm_new_crtc_state->stream,
8995                                                      &stream_update,
8996                                                      dc_state);
8997                 mutex_unlock(&dm->dc_lock);
8998         }
8999
9000         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9001         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9002                                       new_crtc_state, i) {
9003                 if (old_crtc_state->active && !new_crtc_state->active)
9004                         crtc_disable_count++;
9005
9006                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9007                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9008
9009                 /* For freesync config update on crtc state and params for irq */
9010                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9011
9012                 /* Handle vrr on->off / off->on transitions */
9013                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9014                                                 dm_new_crtc_state);
9015         }
9016
9017         /**
9018          * Enable interrupts for CRTCs that are newly enabled or went through
9019          * a modeset. It was intentionally deferred until after the front end
9020          * state was modified to wait until the OTG was on and so the IRQ
9021          * handlers didn't access stale or invalid state.
9022          */
9023         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9024                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9025 #ifdef CONFIG_DEBUG_FS
9026                 bool configure_crc = false;
9027                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9028 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9029                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9030 #endif
9031                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9032                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9033                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9034 #endif
9035                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9036
9037                 if (new_crtc_state->active &&
9038                     (!old_crtc_state->active ||
9039                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9040                         dc_stream_retain(dm_new_crtc_state->stream);
9041                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9042                         manage_dm_interrupts(adev, acrtc, true);
9043
9044 #ifdef CONFIG_DEBUG_FS
9045                         /**
9046                          * Frontend may have changed so reapply the CRC capture
9047                          * settings for the stream.
9048                          */
9049                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9050
9051                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9052                                 configure_crc = true;
9053 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9054                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9055                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9056                                         acrtc->dm_irq_params.crc_window.update_win = true;
9057                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9058                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9059                                         crc_rd_wrk->crtc = crtc;
9060                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9061                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9062                                 }
9063 #endif
9064                         }
9065
9066                         if (configure_crc)
9067                                 if (amdgpu_dm_crtc_configure_crc_source(
9068                                         crtc, dm_new_crtc_state, cur_crc_src))
9069                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9070 #endif
9071                 }
9072         }
9073
9074         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9075                 if (new_crtc_state->async_flip)
9076                         wait_for_vblank = false;
9077
9078         /* update planes when needed per crtc*/
9079         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9080                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9081
9082                 if (dm_new_crtc_state->stream)
9083                         amdgpu_dm_commit_planes(state, dc_state, dev,
9084                                                 dm, crtc, wait_for_vblank);
9085         }
9086
9087         /* Update audio instances for each connector. */
9088         amdgpu_dm_commit_audio(dev, state);
9089
9090 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9091         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9092         /* restore the backlight level */
9093         if (dm->backlight_dev)
9094                 amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9095 #endif
9096         /*
9097          * send vblank event on all events not handled in flip and
9098          * mark consumed event for drm_atomic_helper_commit_hw_done
9099          */
9100         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9101         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9102
9103                 if (new_crtc_state->event)
9104                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9105
9106                 new_crtc_state->event = NULL;
9107         }
9108         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9109
9110         /* Signal HW programming completion */
9111         drm_atomic_helper_commit_hw_done(state);
9112
9113         if (wait_for_vblank)
9114                 drm_atomic_helper_wait_for_flip_done(dev, state);
9115
9116         drm_atomic_helper_cleanup_planes(dev, state);
9117
9118         /* return the stolen vga memory back to VRAM */
9119         if (!adev->mman.keep_stolen_vga_memory)
9120                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9121         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9122
9123         /*
9124          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9125          * so we can put the GPU into runtime suspend if we're not driving any
9126          * displays anymore
9127          */
9128         for (i = 0; i < crtc_disable_count; i++)
9129                 pm_runtime_put_autosuspend(dev->dev);
9130         pm_runtime_mark_last_busy(dev->dev);
9131
9132         if (dc_state_temp)
9133                 dc_release_state(dc_state_temp);
9134 }
9135
9136
9137 static int dm_force_atomic_commit(struct drm_connector *connector)
9138 {
9139         int ret = 0;
9140         struct drm_device *ddev = connector->dev;
9141         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9142         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9143         struct drm_plane *plane = disconnected_acrtc->base.primary;
9144         struct drm_connector_state *conn_state;
9145         struct drm_crtc_state *crtc_state;
9146         struct drm_plane_state *plane_state;
9147
9148         if (!state)
9149                 return -ENOMEM;
9150
9151         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9152
9153         /* Construct an atomic state to restore previous display setting */
9154
9155         /*
9156          * Attach connectors to drm_atomic_state
9157          */
9158         conn_state = drm_atomic_get_connector_state(state, connector);
9159
9160         ret = PTR_ERR_OR_ZERO(conn_state);
9161         if (ret)
9162                 goto out;
9163
9164         /* Attach crtc to drm_atomic_state*/
9165         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9166
9167         ret = PTR_ERR_OR_ZERO(crtc_state);
9168         if (ret)
9169                 goto out;
9170
9171         /* force a restore */
9172         crtc_state->mode_changed = true;
9173
9174         /* Attach plane to drm_atomic_state */
9175         plane_state = drm_atomic_get_plane_state(state, plane);
9176
9177         ret = PTR_ERR_OR_ZERO(plane_state);
9178         if (ret)
9179                 goto out;
9180
9181         /* Call commit internally with the state we just constructed */
9182         ret = drm_atomic_commit(state);
9183
9184 out:
9185         drm_atomic_state_put(state);
9186         if (ret)
9187                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9188
9189         return ret;
9190 }
9191
9192 /*
9193  * This function handles all cases when set mode does not come upon hotplug.
9194  * This includes when a display is unplugged then plugged back into the
9195  * same port and when running without usermode desktop manager supprot
9196  */
9197 void dm_restore_drm_connector_state(struct drm_device *dev,
9198                                     struct drm_connector *connector)
9199 {
9200         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9201         struct amdgpu_crtc *disconnected_acrtc;
9202         struct dm_crtc_state *acrtc_state;
9203
9204         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9205                 return;
9206
9207         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9208         if (!disconnected_acrtc)
9209                 return;
9210
9211         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9212         if (!acrtc_state->stream)
9213                 return;
9214
9215         /*
9216          * If the previous sink is not released and different from the current,
9217          * we deduce we are in a state where we can not rely on usermode call
9218          * to turn on the display, so we do it here
9219          */
9220         if (acrtc_state->stream->sink != aconnector->dc_sink)
9221                 dm_force_atomic_commit(&aconnector->base);
9222 }
9223
9224 /*
9225  * Grabs all modesetting locks to serialize against any blocking commits,
9226  * Waits for completion of all non blocking commits.
9227  */
9228 static int do_aquire_global_lock(struct drm_device *dev,
9229                                  struct drm_atomic_state *state)
9230 {
9231         struct drm_crtc *crtc;
9232         struct drm_crtc_commit *commit;
9233         long ret;
9234
9235         /*
9236          * Adding all modeset locks to aquire_ctx will
9237          * ensure that when the framework release it the
9238          * extra locks we are locking here will get released to
9239          */
9240         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9241         if (ret)
9242                 return ret;
9243
9244         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9245                 spin_lock(&crtc->commit_lock);
9246                 commit = list_first_entry_or_null(&crtc->commit_list,
9247                                 struct drm_crtc_commit, commit_entry);
9248                 if (commit)
9249                         drm_crtc_commit_get(commit);
9250                 spin_unlock(&crtc->commit_lock);
9251
9252                 if (!commit)
9253                         continue;
9254
9255                 /*
9256                  * Make sure all pending HW programming completed and
9257                  * page flips done
9258                  */
9259                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9260
9261                 if (ret > 0)
9262                         ret = wait_for_completion_interruptible_timeout(
9263                                         &commit->flip_done, 10*HZ);
9264
9265                 if (ret == 0)
9266                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9267                                   "timed out\n", crtc->base.id, crtc->name);
9268
9269                 drm_crtc_commit_put(commit);
9270         }
9271
9272         return ret < 0 ? ret : 0;
9273 }
9274
9275 static void get_freesync_config_for_crtc(
9276         struct dm_crtc_state *new_crtc_state,
9277         struct dm_connector_state *new_con_state)
9278 {
9279         struct mod_freesync_config config = {0};
9280         struct amdgpu_dm_connector *aconnector =
9281                         to_amdgpu_dm_connector(new_con_state->base.connector);
9282         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9283         int vrefresh = drm_mode_vrefresh(mode);
9284         bool fs_vid_mode = false;
9285
9286         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9287                                         vrefresh >= aconnector->min_vfreq &&
9288                                         vrefresh <= aconnector->max_vfreq;
9289
9290         if (new_crtc_state->vrr_supported) {
9291                 new_crtc_state->stream->ignore_msa_timing_param = true;
9292                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9293
9294                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9295                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9296                 config.vsif_supported = true;
9297                 config.btr = true;
9298
9299                 if (fs_vid_mode) {
9300                         config.state = VRR_STATE_ACTIVE_FIXED;
9301                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9302                         goto out;
9303                 } else if (new_crtc_state->base.vrr_enabled) {
9304                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9305                 } else {
9306                         config.state = VRR_STATE_INACTIVE;
9307                 }
9308         }
9309 out:
9310         new_crtc_state->freesync_config = config;
9311 }
9312
9313 static void reset_freesync_config_for_crtc(
9314         struct dm_crtc_state *new_crtc_state)
9315 {
9316         new_crtc_state->vrr_supported = false;
9317
9318         memset(&new_crtc_state->vrr_infopacket, 0,
9319                sizeof(new_crtc_state->vrr_infopacket));
9320 }
9321
9322 static bool
9323 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9324                                  struct drm_crtc_state *new_crtc_state)
9325 {
9326         struct drm_display_mode old_mode, new_mode;
9327
9328         if (!old_crtc_state || !new_crtc_state)
9329                 return false;
9330
9331         old_mode = old_crtc_state->mode;
9332         new_mode = new_crtc_state->mode;
9333
9334         if (old_mode.clock       == new_mode.clock &&
9335             old_mode.hdisplay    == new_mode.hdisplay &&
9336             old_mode.vdisplay    == new_mode.vdisplay &&
9337             old_mode.htotal      == new_mode.htotal &&
9338             old_mode.vtotal      != new_mode.vtotal &&
9339             old_mode.hsync_start == new_mode.hsync_start &&
9340             old_mode.vsync_start != new_mode.vsync_start &&
9341             old_mode.hsync_end   == new_mode.hsync_end &&
9342             old_mode.vsync_end   != new_mode.vsync_end &&
9343             old_mode.hskew       == new_mode.hskew &&
9344             old_mode.vscan       == new_mode.vscan &&
9345             (old_mode.vsync_end - old_mode.vsync_start) ==
9346             (new_mode.vsync_end - new_mode.vsync_start))
9347                 return true;
9348
9349         return false;
9350 }
9351
9352 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9353         uint64_t num, den, res;
9354         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9355
9356         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9357
9358         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9359         den = (unsigned long long)new_crtc_state->mode.htotal *
9360               (unsigned long long)new_crtc_state->mode.vtotal;
9361
9362         res = div_u64(num, den);
9363         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9364 }
9365
9366 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9367                                 struct drm_atomic_state *state,
9368                                 struct drm_crtc *crtc,
9369                                 struct drm_crtc_state *old_crtc_state,
9370                                 struct drm_crtc_state *new_crtc_state,
9371                                 bool enable,
9372                                 bool *lock_and_validation_needed)
9373 {
9374         struct dm_atomic_state *dm_state = NULL;
9375         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9376         struct dc_stream_state *new_stream;
9377         int ret = 0;
9378
9379         /*
9380          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9381          * update changed items
9382          */
9383         struct amdgpu_crtc *acrtc = NULL;
9384         struct amdgpu_dm_connector *aconnector = NULL;
9385         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9386         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9387
9388         new_stream = NULL;
9389
9390         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9391         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9392         acrtc = to_amdgpu_crtc(crtc);
9393         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9394
9395         /* TODO This hack should go away */
9396         if (aconnector && enable) {
9397                 /* Make sure fake sink is created in plug-in scenario */
9398                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9399                                                             &aconnector->base);
9400                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9401                                                             &aconnector->base);
9402
9403                 if (IS_ERR(drm_new_conn_state)) {
9404                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9405                         goto fail;
9406                 }
9407
9408                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9409                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9410
9411                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9412                         goto skip_modeset;
9413
9414                 new_stream = create_validate_stream_for_sink(aconnector,
9415                                                              &new_crtc_state->mode,
9416                                                              dm_new_conn_state,
9417                                                              dm_old_crtc_state->stream);
9418
9419                 /*
9420                  * we can have no stream on ACTION_SET if a display
9421                  * was disconnected during S3, in this case it is not an
9422                  * error, the OS will be updated after detection, and
9423                  * will do the right thing on next atomic commit
9424                  */
9425
9426                 if (!new_stream) {
9427                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9428                                         __func__, acrtc->base.base.id);
9429                         ret = -ENOMEM;
9430                         goto fail;
9431                 }
9432
9433                 /*
9434                  * TODO: Check VSDB bits to decide whether this should
9435                  * be enabled or not.
9436                  */
9437                 new_stream->triggered_crtc_reset.enabled =
9438                         dm->force_timing_sync;
9439
9440                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9441
9442                 ret = fill_hdr_info_packet(drm_new_conn_state,
9443                                            &new_stream->hdr_static_metadata);
9444                 if (ret)
9445                         goto fail;
9446
9447                 /*
9448                  * If we already removed the old stream from the context
9449                  * (and set the new stream to NULL) then we can't reuse
9450                  * the old stream even if the stream and scaling are unchanged.
9451                  * We'll hit the BUG_ON and black screen.
9452                  *
9453                  * TODO: Refactor this function to allow this check to work
9454                  * in all conditions.
9455                  */
9456                 if (amdgpu_freesync_vid_mode &&
9457                     dm_new_crtc_state->stream &&
9458                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9459                         goto skip_modeset;
9460
9461                 if (dm_new_crtc_state->stream &&
9462                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9463                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9464                         new_crtc_state->mode_changed = false;
9465                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9466                                          new_crtc_state->mode_changed);
9467                 }
9468         }
9469
9470         /* mode_changed flag may get updated above, need to check again */
9471         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9472                 goto skip_modeset;
9473
9474         DRM_DEBUG_ATOMIC(
9475                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9476                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9477                 "connectors_changed:%d\n",
9478                 acrtc->crtc_id,
9479                 new_crtc_state->enable,
9480                 new_crtc_state->active,
9481                 new_crtc_state->planes_changed,
9482                 new_crtc_state->mode_changed,
9483                 new_crtc_state->active_changed,
9484                 new_crtc_state->connectors_changed);
9485
9486         /* Remove stream for any changed/disabled CRTC */
9487         if (!enable) {
9488
9489                 if (!dm_old_crtc_state->stream)
9490                         goto skip_modeset;
9491
9492                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9493                     is_timing_unchanged_for_freesync(new_crtc_state,
9494                                                      old_crtc_state)) {
9495                         new_crtc_state->mode_changed = false;
9496                         DRM_DEBUG_DRIVER(
9497                                 "Mode change not required for front porch change, "
9498                                 "setting mode_changed to %d",
9499                                 new_crtc_state->mode_changed);
9500
9501                         set_freesync_fixed_config(dm_new_crtc_state);
9502
9503                         goto skip_modeset;
9504                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9505                            is_freesync_video_mode(&new_crtc_state->mode,
9506                                                   aconnector)) {
9507                         set_freesync_fixed_config(dm_new_crtc_state);
9508                 }
9509
9510                 ret = dm_atomic_get_state(state, &dm_state);
9511                 if (ret)
9512                         goto fail;
9513
9514                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9515                                 crtc->base.id);
9516
9517                 /* i.e. reset mode */
9518                 if (dc_remove_stream_from_ctx(
9519                                 dm->dc,
9520                                 dm_state->context,
9521                                 dm_old_crtc_state->stream) != DC_OK) {
9522                         ret = -EINVAL;
9523                         goto fail;
9524                 }
9525
9526                 dc_stream_release(dm_old_crtc_state->stream);
9527                 dm_new_crtc_state->stream = NULL;
9528
9529                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9530
9531                 *lock_and_validation_needed = true;
9532
9533         } else {/* Add stream for any updated/enabled CRTC */
9534                 /*
9535                  * Quick fix to prevent NULL pointer on new_stream when
9536                  * added MST connectors not found in existing crtc_state in the chained mode
9537                  * TODO: need to dig out the root cause of that
9538                  */
9539                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9540                         goto skip_modeset;
9541
9542                 if (modereset_required(new_crtc_state))
9543                         goto skip_modeset;
9544
9545                 if (modeset_required(new_crtc_state, new_stream,
9546                                      dm_old_crtc_state->stream)) {
9547
9548                         WARN_ON(dm_new_crtc_state->stream);
9549
9550                         ret = dm_atomic_get_state(state, &dm_state);
9551                         if (ret)
9552                                 goto fail;
9553
9554                         dm_new_crtc_state->stream = new_stream;
9555
9556                         dc_stream_retain(new_stream);
9557
9558                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9559                                          crtc->base.id);
9560
9561                         if (dc_add_stream_to_ctx(
9562                                         dm->dc,
9563                                         dm_state->context,
9564                                         dm_new_crtc_state->stream) != DC_OK) {
9565                                 ret = -EINVAL;
9566                                 goto fail;
9567                         }
9568
9569                         *lock_and_validation_needed = true;
9570                 }
9571         }
9572
9573 skip_modeset:
9574         /* Release extra reference */
9575         if (new_stream)
9576                  dc_stream_release(new_stream);
9577
9578         /*
9579          * We want to do dc stream updates that do not require a
9580          * full modeset below.
9581          */
9582         if (!(enable && aconnector && new_crtc_state->active))
9583                 return 0;
9584         /*
9585          * Given above conditions, the dc state cannot be NULL because:
9586          * 1. We're in the process of enabling CRTCs (just been added
9587          *    to the dc context, or already is on the context)
9588          * 2. Has a valid connector attached, and
9589          * 3. Is currently active and enabled.
9590          * => The dc stream state currently exists.
9591          */
9592         BUG_ON(dm_new_crtc_state->stream == NULL);
9593
9594         /* Scaling or underscan settings */
9595         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9596                 update_stream_scaling_settings(
9597                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9598
9599         /* ABM settings */
9600         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9601
9602         /*
9603          * Color management settings. We also update color properties
9604          * when a modeset is needed, to ensure it gets reprogrammed.
9605          */
9606         if (dm_new_crtc_state->base.color_mgmt_changed ||
9607             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9608                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9609                 if (ret)
9610                         goto fail;
9611         }
9612
9613         /* Update Freesync settings. */
9614         get_freesync_config_for_crtc(dm_new_crtc_state,
9615                                      dm_new_conn_state);
9616
9617         return ret;
9618
9619 fail:
9620         if (new_stream)
9621                 dc_stream_release(new_stream);
9622         return ret;
9623 }
9624
9625 static bool should_reset_plane(struct drm_atomic_state *state,
9626                                struct drm_plane *plane,
9627                                struct drm_plane_state *old_plane_state,
9628                                struct drm_plane_state *new_plane_state)
9629 {
9630         struct drm_plane *other;
9631         struct drm_plane_state *old_other_state, *new_other_state;
9632         struct drm_crtc_state *new_crtc_state;
9633         int i;
9634
9635         /*
9636          * TODO: Remove this hack once the checks below are sufficient
9637          * enough to determine when we need to reset all the planes on
9638          * the stream.
9639          */
9640         if (state->allow_modeset)
9641                 return true;
9642
9643         /* Exit early if we know that we're adding or removing the plane. */
9644         if (old_plane_state->crtc != new_plane_state->crtc)
9645                 return true;
9646
9647         /* old crtc == new_crtc == NULL, plane not in context. */
9648         if (!new_plane_state->crtc)
9649                 return false;
9650
9651         new_crtc_state =
9652                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9653
9654         if (!new_crtc_state)
9655                 return true;
9656
9657         /* CRTC Degamma changes currently require us to recreate planes. */
9658         if (new_crtc_state->color_mgmt_changed)
9659                 return true;
9660
9661         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9662                 return true;
9663
9664         /*
9665          * If there are any new primary or overlay planes being added or
9666          * removed then the z-order can potentially change. To ensure
9667          * correct z-order and pipe acquisition the current DC architecture
9668          * requires us to remove and recreate all existing planes.
9669          *
9670          * TODO: Come up with a more elegant solution for this.
9671          */
9672         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9673                 struct amdgpu_framebuffer *old_afb, *new_afb;
9674                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9675                         continue;
9676
9677                 if (old_other_state->crtc != new_plane_state->crtc &&
9678                     new_other_state->crtc != new_plane_state->crtc)
9679                         continue;
9680
9681                 if (old_other_state->crtc != new_other_state->crtc)
9682                         return true;
9683
9684                 /* Src/dst size and scaling updates. */
9685                 if (old_other_state->src_w != new_other_state->src_w ||
9686                     old_other_state->src_h != new_other_state->src_h ||
9687                     old_other_state->crtc_w != new_other_state->crtc_w ||
9688                     old_other_state->crtc_h != new_other_state->crtc_h)
9689                         return true;
9690
9691                 /* Rotation / mirroring updates. */
9692                 if (old_other_state->rotation != new_other_state->rotation)
9693                         return true;
9694
9695                 /* Blending updates. */
9696                 if (old_other_state->pixel_blend_mode !=
9697                     new_other_state->pixel_blend_mode)
9698                         return true;
9699
9700                 /* Alpha updates. */
9701                 if (old_other_state->alpha != new_other_state->alpha)
9702                         return true;
9703
9704                 /* Colorspace changes. */
9705                 if (old_other_state->color_range != new_other_state->color_range ||
9706                     old_other_state->color_encoding != new_other_state->color_encoding)
9707                         return true;
9708
9709                 /* Framebuffer checks fall at the end. */
9710                 if (!old_other_state->fb || !new_other_state->fb)
9711                         continue;
9712
9713                 /* Pixel format changes can require bandwidth updates. */
9714                 if (old_other_state->fb->format != new_other_state->fb->format)
9715                         return true;
9716
9717                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9718                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9719
9720                 /* Tiling and DCC changes also require bandwidth updates. */
9721                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9722                     old_afb->base.modifier != new_afb->base.modifier)
9723                         return true;
9724         }
9725
9726         return false;
9727 }
9728
9729 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9730                               struct drm_plane_state *new_plane_state,
9731                               struct drm_framebuffer *fb)
9732 {
9733         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9734         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9735         unsigned int pitch;
9736         bool linear;
9737
9738         if (fb->width > new_acrtc->max_cursor_width ||
9739             fb->height > new_acrtc->max_cursor_height) {
9740                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9741                                  new_plane_state->fb->width,
9742                                  new_plane_state->fb->height);
9743                 return -EINVAL;
9744         }
9745         if (new_plane_state->src_w != fb->width << 16 ||
9746             new_plane_state->src_h != fb->height << 16) {
9747                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9748                 return -EINVAL;
9749         }
9750
9751         /* Pitch in pixels */
9752         pitch = fb->pitches[0] / fb->format->cpp[0];
9753
9754         if (fb->width != pitch) {
9755                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9756                                  fb->width, pitch);
9757                 return -EINVAL;
9758         }
9759
9760         switch (pitch) {
9761         case 64:
9762         case 128:
9763         case 256:
9764                 /* FB pitch is supported by cursor plane */
9765                 break;
9766         default:
9767                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9768                 return -EINVAL;
9769         }
9770
9771         /* Core DRM takes care of checking FB modifiers, so we only need to
9772          * check tiling flags when the FB doesn't have a modifier. */
9773         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9774                 if (adev->family < AMDGPU_FAMILY_AI) {
9775                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9776                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9777                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9778                 } else {
9779                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9780                 }
9781                 if (!linear) {
9782                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9783                         return -EINVAL;
9784                 }
9785         }
9786
9787         return 0;
9788 }
9789
9790 static int dm_update_plane_state(struct dc *dc,
9791                                  struct drm_atomic_state *state,
9792                                  struct drm_plane *plane,
9793                                  struct drm_plane_state *old_plane_state,
9794                                  struct drm_plane_state *new_plane_state,
9795                                  bool enable,
9796                                  bool *lock_and_validation_needed)
9797 {
9798
9799         struct dm_atomic_state *dm_state = NULL;
9800         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9801         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9802         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9803         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9804         struct amdgpu_crtc *new_acrtc;
9805         bool needs_reset;
9806         int ret = 0;
9807
9808
9809         new_plane_crtc = new_plane_state->crtc;
9810         old_plane_crtc = old_plane_state->crtc;
9811         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9812         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9813
9814         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9815                 if (!enable || !new_plane_crtc ||
9816                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9817                         return 0;
9818
9819                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9820
9821                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9822                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9823                         return -EINVAL;
9824                 }
9825
9826                 if (new_plane_state->fb) {
9827                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9828                                                  new_plane_state->fb);
9829                         if (ret)
9830                                 return ret;
9831                 }
9832
9833                 return 0;
9834         }
9835
9836         needs_reset = should_reset_plane(state, plane, old_plane_state,
9837                                          new_plane_state);
9838
9839         /* Remove any changed/removed planes */
9840         if (!enable) {
9841                 if (!needs_reset)
9842                         return 0;
9843
9844                 if (!old_plane_crtc)
9845                         return 0;
9846
9847                 old_crtc_state = drm_atomic_get_old_crtc_state(
9848                                 state, old_plane_crtc);
9849                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9850
9851                 if (!dm_old_crtc_state->stream)
9852                         return 0;
9853
9854                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9855                                 plane->base.id, old_plane_crtc->base.id);
9856
9857                 ret = dm_atomic_get_state(state, &dm_state);
9858                 if (ret)
9859                         return ret;
9860
9861                 if (!dc_remove_plane_from_context(
9862                                 dc,
9863                                 dm_old_crtc_state->stream,
9864                                 dm_old_plane_state->dc_state,
9865                                 dm_state->context)) {
9866
9867                         return -EINVAL;
9868                 }
9869
9870
9871                 dc_plane_state_release(dm_old_plane_state->dc_state);
9872                 dm_new_plane_state->dc_state = NULL;
9873
9874                 *lock_and_validation_needed = true;
9875
9876         } else { /* Add new planes */
9877                 struct dc_plane_state *dc_new_plane_state;
9878
9879                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9880                         return 0;
9881
9882                 if (!new_plane_crtc)
9883                         return 0;
9884
9885                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9886                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9887
9888                 if (!dm_new_crtc_state->stream)
9889                         return 0;
9890
9891                 if (!needs_reset)
9892                         return 0;
9893
9894                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9895                 if (ret)
9896                         return ret;
9897
9898                 WARN_ON(dm_new_plane_state->dc_state);
9899
9900                 dc_new_plane_state = dc_create_plane_state(dc);
9901                 if (!dc_new_plane_state)
9902                         return -ENOMEM;
9903
9904                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9905                                  plane->base.id, new_plane_crtc->base.id);
9906
9907                 ret = fill_dc_plane_attributes(
9908                         drm_to_adev(new_plane_crtc->dev),
9909                         dc_new_plane_state,
9910                         new_plane_state,
9911                         new_crtc_state);
9912                 if (ret) {
9913                         dc_plane_state_release(dc_new_plane_state);
9914                         return ret;
9915                 }
9916
9917                 ret = dm_atomic_get_state(state, &dm_state);
9918                 if (ret) {
9919                         dc_plane_state_release(dc_new_plane_state);
9920                         return ret;
9921                 }
9922
9923                 /*
9924                  * Any atomic check errors that occur after this will
9925                  * not need a release. The plane state will be attached
9926                  * to the stream, and therefore part of the atomic
9927                  * state. It'll be released when the atomic state is
9928                  * cleaned.
9929                  */
9930                 if (!dc_add_plane_to_context(
9931                                 dc,
9932                                 dm_new_crtc_state->stream,
9933                                 dc_new_plane_state,
9934                                 dm_state->context)) {
9935
9936                         dc_plane_state_release(dc_new_plane_state);
9937                         return -EINVAL;
9938                 }
9939
9940                 dm_new_plane_state->dc_state = dc_new_plane_state;
9941
9942                 /* Tell DC to do a full surface update every time there
9943                  * is a plane change. Inefficient, but works for now.
9944                  */
9945                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9946
9947                 *lock_and_validation_needed = true;
9948         }
9949
9950
9951         return ret;
9952 }
9953
9954 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9955                                 struct drm_crtc *crtc,
9956                                 struct drm_crtc_state *new_crtc_state)
9957 {
9958         struct drm_plane_state *new_cursor_state, *new_primary_state;
9959         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9960
9961         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9962          * cursor per pipe but it's going to inherit the scaling and
9963          * positioning from the underlying pipe. Check the cursor plane's
9964          * blending properties match the primary plane's. */
9965
9966         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9967         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9968         if (!new_cursor_state || !new_primary_state ||
9969             !new_cursor_state->fb || !new_primary_state->fb) {
9970                 return 0;
9971         }
9972
9973         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9974                          (new_cursor_state->src_w >> 16);
9975         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9976                          (new_cursor_state->src_h >> 16);
9977
9978         primary_scale_w = new_primary_state->crtc_w * 1000 /
9979                          (new_primary_state->src_w >> 16);
9980         primary_scale_h = new_primary_state->crtc_h * 1000 /
9981                          (new_primary_state->src_h >> 16);
9982
9983         if (cursor_scale_w != primary_scale_w ||
9984             cursor_scale_h != primary_scale_h) {
9985                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9986                 return -EINVAL;
9987         }
9988
9989         return 0;
9990 }
9991
9992 #if defined(CONFIG_DRM_AMD_DC_DCN)
9993 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9994 {
9995         struct drm_connector *connector;
9996         struct drm_connector_state *conn_state;
9997         struct amdgpu_dm_connector *aconnector = NULL;
9998         int i;
9999         for_each_new_connector_in_state(state, connector, conn_state, i) {
10000                 if (conn_state->crtc != crtc)
10001                         continue;
10002
10003                 aconnector = to_amdgpu_dm_connector(connector);
10004                 if (!aconnector->port || !aconnector->mst_port)
10005                         aconnector = NULL;
10006                 else
10007                         break;
10008         }
10009
10010         if (!aconnector)
10011                 return 0;
10012
10013         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10014 }
10015 #endif
10016
10017 static int validate_overlay(struct drm_atomic_state *state)
10018 {
10019         int i;
10020         struct drm_plane *plane;
10021         struct drm_plane_state *old_plane_state, *new_plane_state;
10022         struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
10023
10024         /* Check if primary plane is contained inside overlay */
10025         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10026                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10027                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10028                                 return 0;
10029
10030                         overlay_state = new_plane_state;
10031                         continue;
10032                 }
10033         }
10034
10035         /* check if we're making changes to the overlay plane */
10036         if (!overlay_state)
10037                 return 0;
10038
10039         /* check if overlay plane is enabled */
10040         if (!overlay_state->crtc)
10041                 return 0;
10042
10043         /* find the primary plane for the CRTC that the overlay is enabled on */
10044         primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10045         if (IS_ERR(primary_state))
10046                 return PTR_ERR(primary_state);
10047
10048         /* check if primary plane is enabled */
10049         if (!primary_state->crtc)
10050                 return 0;
10051
10052         /* check if cursor plane is enabled */
10053         cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
10054         if (IS_ERR(cursor_state))
10055                 return PTR_ERR(cursor_state);
10056
10057         if (drm_atomic_plane_disabling(plane->state, cursor_state))
10058                 return 0;
10059
10060         /* Perform the bounds check to ensure the overlay plane covers the primary */
10061         if (primary_state->crtc_x < overlay_state->crtc_x ||
10062             primary_state->crtc_y < overlay_state->crtc_y ||
10063             primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10064             primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10065                 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10066                 return -EINVAL;
10067         }
10068
10069         return 0;
10070 }
10071
10072 /**
10073  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10074  * @dev: The DRM device
10075  * @state: The atomic state to commit
10076  *
10077  * Validate that the given atomic state is programmable by DC into hardware.
10078  * This involves constructing a &struct dc_state reflecting the new hardware
10079  * state we wish to commit, then querying DC to see if it is programmable. It's
10080  * important not to modify the existing DC state. Otherwise, atomic_check
10081  * may unexpectedly commit hardware changes.
10082  *
10083  * When validating the DC state, it's important that the right locks are
10084  * acquired. For full updates case which removes/adds/updates streams on one
10085  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10086  * that any such full update commit will wait for completion of any outstanding
10087  * flip using DRMs synchronization events.
10088  *
10089  * Note that DM adds the affected connectors for all CRTCs in state, when that
10090  * might not seem necessary. This is because DC stream creation requires the
10091  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10092  * be possible but non-trivial - a possible TODO item.
10093  *
10094  * Return: -Error code if validation failed.
10095  */
10096 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10097                                   struct drm_atomic_state *state)
10098 {
10099         struct amdgpu_device *adev = drm_to_adev(dev);
10100         struct dm_atomic_state *dm_state = NULL;
10101         struct dc *dc = adev->dm.dc;
10102         struct drm_connector *connector;
10103         struct drm_connector_state *old_con_state, *new_con_state;
10104         struct drm_crtc *crtc;
10105         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10106         struct drm_plane *plane;
10107         struct drm_plane_state *old_plane_state, *new_plane_state;
10108         enum dc_status status;
10109         int ret, i;
10110         bool lock_and_validation_needed = false;
10111         struct dm_crtc_state *dm_old_crtc_state;
10112
10113         trace_amdgpu_dm_atomic_check_begin(state);
10114
10115         ret = drm_atomic_helper_check_modeset(dev, state);
10116         if (ret)
10117                 goto fail;
10118
10119         /* Check connector changes */
10120         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10121                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10122                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10123
10124                 /* Skip connectors that are disabled or part of modeset already. */
10125                 if (!old_con_state->crtc && !new_con_state->crtc)
10126                         continue;
10127
10128                 if (!new_con_state->crtc)
10129                         continue;
10130
10131                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10132                 if (IS_ERR(new_crtc_state)) {
10133                         ret = PTR_ERR(new_crtc_state);
10134                         goto fail;
10135                 }
10136
10137                 if (dm_old_con_state->abm_level !=
10138                     dm_new_con_state->abm_level)
10139                         new_crtc_state->connectors_changed = true;
10140         }
10141
10142 #if defined(CONFIG_DRM_AMD_DC_DCN)
10143         if (dc_resource_is_dsc_encoding_supported(dc)) {
10144                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10145                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10146                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10147                                 if (ret)
10148                                         goto fail;
10149                         }
10150                 }
10151         }
10152 #endif
10153         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10154                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10155
10156                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10157                     !new_crtc_state->color_mgmt_changed &&
10158                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10159                         dm_old_crtc_state->dsc_force_changed == false)
10160                         continue;
10161
10162                 if (!new_crtc_state->enable)
10163                         continue;
10164
10165                 ret = drm_atomic_add_affected_connectors(state, crtc);
10166                 if (ret)
10167                         return ret;
10168
10169                 ret = drm_atomic_add_affected_planes(state, crtc);
10170                 if (ret)
10171                         goto fail;
10172
10173                 if (dm_old_crtc_state->dsc_force_changed)
10174                         new_crtc_state->mode_changed = true;
10175         }
10176
10177         /*
10178          * Add all primary and overlay planes on the CRTC to the state
10179          * whenever a plane is enabled to maintain correct z-ordering
10180          * and to enable fast surface updates.
10181          */
10182         drm_for_each_crtc(crtc, dev) {
10183                 bool modified = false;
10184
10185                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10186                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10187                                 continue;
10188
10189                         if (new_plane_state->crtc == crtc ||
10190                             old_plane_state->crtc == crtc) {
10191                                 modified = true;
10192                                 break;
10193                         }
10194                 }
10195
10196                 if (!modified)
10197                         continue;
10198
10199                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10200                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10201                                 continue;
10202
10203                         new_plane_state =
10204                                 drm_atomic_get_plane_state(state, plane);
10205
10206                         if (IS_ERR(new_plane_state)) {
10207                                 ret = PTR_ERR(new_plane_state);
10208                                 goto fail;
10209                         }
10210                 }
10211         }
10212
10213         /* Remove exiting planes if they are modified */
10214         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10215                 ret = dm_update_plane_state(dc, state, plane,
10216                                             old_plane_state,
10217                                             new_plane_state,
10218                                             false,
10219                                             &lock_and_validation_needed);
10220                 if (ret)
10221                         goto fail;
10222         }
10223
10224         /* Disable all crtcs which require disable */
10225         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10226                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10227                                            old_crtc_state,
10228                                            new_crtc_state,
10229                                            false,
10230                                            &lock_and_validation_needed);
10231                 if (ret)
10232                         goto fail;
10233         }
10234
10235         /* Enable all crtcs which require enable */
10236         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10237                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10238                                            old_crtc_state,
10239                                            new_crtc_state,
10240                                            true,
10241                                            &lock_and_validation_needed);
10242                 if (ret)
10243                         goto fail;
10244         }
10245
10246         ret = validate_overlay(state);
10247         if (ret)
10248                 goto fail;
10249
10250         /* Add new/modified planes */
10251         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10252                 ret = dm_update_plane_state(dc, state, plane,
10253                                             old_plane_state,
10254                                             new_plane_state,
10255                                             true,
10256                                             &lock_and_validation_needed);
10257                 if (ret)
10258                         goto fail;
10259         }
10260
10261         /* Run this here since we want to validate the streams we created */
10262         ret = drm_atomic_helper_check_planes(dev, state);
10263         if (ret)
10264                 goto fail;
10265
10266         /* Check cursor planes scaling */
10267         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10268                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10269                 if (ret)
10270                         goto fail;
10271         }
10272
10273         if (state->legacy_cursor_update) {
10274                 /*
10275                  * This is a fast cursor update coming from the plane update
10276                  * helper, check if it can be done asynchronously for better
10277                  * performance.
10278                  */
10279                 state->async_update =
10280                         !drm_atomic_helper_async_check(dev, state);
10281
10282                 /*
10283                  * Skip the remaining global validation if this is an async
10284                  * update. Cursor updates can be done without affecting
10285                  * state or bandwidth calcs and this avoids the performance
10286                  * penalty of locking the private state object and
10287                  * allocating a new dc_state.
10288                  */
10289                 if (state->async_update)
10290                         return 0;
10291         }
10292
10293         /* Check scaling and underscan changes*/
10294         /* TODO Removed scaling changes validation due to inability to commit
10295          * new stream into context w\o causing full reset. Need to
10296          * decide how to handle.
10297          */
10298         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10299                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10300                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10301                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10302
10303                 /* Skip any modesets/resets */
10304                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10305                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10306                         continue;
10307
10308                 /* Skip any thing not scale or underscan changes */
10309                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10310                         continue;
10311
10312                 lock_and_validation_needed = true;
10313         }
10314
10315         /**
10316          * Streams and planes are reset when there are changes that affect
10317          * bandwidth. Anything that affects bandwidth needs to go through
10318          * DC global validation to ensure that the configuration can be applied
10319          * to hardware.
10320          *
10321          * We have to currently stall out here in atomic_check for outstanding
10322          * commits to finish in this case because our IRQ handlers reference
10323          * DRM state directly - we can end up disabling interrupts too early
10324          * if we don't.
10325          *
10326          * TODO: Remove this stall and drop DM state private objects.
10327          */
10328         if (lock_and_validation_needed) {
10329                 ret = dm_atomic_get_state(state, &dm_state);
10330                 if (ret)
10331                         goto fail;
10332
10333                 ret = do_aquire_global_lock(dev, state);
10334                 if (ret)
10335                         goto fail;
10336
10337 #if defined(CONFIG_DRM_AMD_DC_DCN)
10338                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10339                         goto fail;
10340
10341                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10342                 if (ret)
10343                         goto fail;
10344 #endif
10345
10346                 /*
10347                  * Perform validation of MST topology in the state:
10348                  * We need to perform MST atomic check before calling
10349                  * dc_validate_global_state(), or there is a chance
10350                  * to get stuck in an infinite loop and hang eventually.
10351                  */
10352                 ret = drm_dp_mst_atomic_check(state);
10353                 if (ret)
10354                         goto fail;
10355                 status = dc_validate_global_state(dc, dm_state->context, false);
10356                 if (status != DC_OK) {
10357                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10358                                        dc_status_to_str(status), status);
10359                         ret = -EINVAL;
10360                         goto fail;
10361                 }
10362         } else {
10363                 /*
10364                  * The commit is a fast update. Fast updates shouldn't change
10365                  * the DC context, affect global validation, and can have their
10366                  * commit work done in parallel with other commits not touching
10367                  * the same resource. If we have a new DC context as part of
10368                  * the DM atomic state from validation we need to free it and
10369                  * retain the existing one instead.
10370                  *
10371                  * Furthermore, since the DM atomic state only contains the DC
10372                  * context and can safely be annulled, we can free the state
10373                  * and clear the associated private object now to free
10374                  * some memory and avoid a possible use-after-free later.
10375                  */
10376
10377                 for (i = 0; i < state->num_private_objs; i++) {
10378                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10379
10380                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10381                                 int j = state->num_private_objs-1;
10382
10383                                 dm_atomic_destroy_state(obj,
10384                                                 state->private_objs[i].state);
10385
10386                                 /* If i is not at the end of the array then the
10387                                  * last element needs to be moved to where i was
10388                                  * before the array can safely be truncated.
10389                                  */
10390                                 if (i != j)
10391                                         state->private_objs[i] =
10392                                                 state->private_objs[j];
10393
10394                                 state->private_objs[j].ptr = NULL;
10395                                 state->private_objs[j].state = NULL;
10396                                 state->private_objs[j].old_state = NULL;
10397                                 state->private_objs[j].new_state = NULL;
10398
10399                                 state->num_private_objs = j;
10400                                 break;
10401                         }
10402                 }
10403         }
10404
10405         /* Store the overall update type for use later in atomic check. */
10406         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10407                 struct dm_crtc_state *dm_new_crtc_state =
10408                         to_dm_crtc_state(new_crtc_state);
10409
10410                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10411                                                          UPDATE_TYPE_FULL :
10412                                                          UPDATE_TYPE_FAST;
10413         }
10414
10415         /* Must be success */
10416         WARN_ON(ret);
10417
10418         trace_amdgpu_dm_atomic_check_finish(state, ret);
10419
10420         return ret;
10421
10422 fail:
10423         if (ret == -EDEADLK)
10424                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10425         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10426                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10427         else
10428                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10429
10430         trace_amdgpu_dm_atomic_check_finish(state, ret);
10431
10432         return ret;
10433 }
10434
10435 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10436                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10437 {
10438         uint8_t dpcd_data;
10439         bool capable = false;
10440
10441         if (amdgpu_dm_connector->dc_link &&
10442                 dm_helpers_dp_read_dpcd(
10443                                 NULL,
10444                                 amdgpu_dm_connector->dc_link,
10445                                 DP_DOWN_STREAM_PORT_COUNT,
10446                                 &dpcd_data,
10447                                 sizeof(dpcd_data))) {
10448                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10449         }
10450
10451         return capable;
10452 }
10453
10454 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10455                 uint8_t *edid_ext, int len,
10456                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10457 {
10458         int i;
10459         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10460         struct dc *dc = adev->dm.dc;
10461
10462         /* send extension block to DMCU for parsing */
10463         for (i = 0; i < len; i += 8) {
10464                 bool res;
10465                 int offset;
10466
10467                 /* send 8 bytes a time */
10468                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10469                         return false;
10470
10471                 if (i+8 == len) {
10472                         /* EDID block sent completed, expect result */
10473                         int version, min_rate, max_rate;
10474
10475                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10476                         if (res) {
10477                                 /* amd vsdb found */
10478                                 vsdb_info->freesync_supported = 1;
10479                                 vsdb_info->amd_vsdb_version = version;
10480                                 vsdb_info->min_refresh_rate_hz = min_rate;
10481                                 vsdb_info->max_refresh_rate_hz = max_rate;
10482                                 return true;
10483                         }
10484                         /* not amd vsdb */
10485                         return false;
10486                 }
10487
10488                 /* check for ack*/
10489                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10490                 if (!res)
10491                         return false;
10492         }
10493
10494         return false;
10495 }
10496
10497 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10498                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10499 {
10500         uint8_t *edid_ext = NULL;
10501         int i;
10502         bool valid_vsdb_found = false;
10503
10504         /*----- drm_find_cea_extension() -----*/
10505         /* No EDID or EDID extensions */
10506         if (edid == NULL || edid->extensions == 0)
10507                 return -ENODEV;
10508
10509         /* Find CEA extension */
10510         for (i = 0; i < edid->extensions; i++) {
10511                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10512                 if (edid_ext[0] == CEA_EXT)
10513                         break;
10514         }
10515
10516         if (i == edid->extensions)
10517                 return -ENODEV;
10518
10519         /*----- cea_db_offsets() -----*/
10520         if (edid_ext[0] != CEA_EXT)
10521                 return -ENODEV;
10522
10523         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10524
10525         return valid_vsdb_found ? i : -ENODEV;
10526 }
10527
10528 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10529                                         struct edid *edid)
10530 {
10531         int i = 0;
10532         struct detailed_timing *timing;
10533         struct detailed_non_pixel *data;
10534         struct detailed_data_monitor_range *range;
10535         struct amdgpu_dm_connector *amdgpu_dm_connector =
10536                         to_amdgpu_dm_connector(connector);
10537         struct dm_connector_state *dm_con_state = NULL;
10538
10539         struct drm_device *dev = connector->dev;
10540         struct amdgpu_device *adev = drm_to_adev(dev);
10541         bool freesync_capable = false;
10542         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10543
10544         if (!connector->state) {
10545                 DRM_ERROR("%s - Connector has no state", __func__);
10546                 goto update;
10547         }
10548
10549         if (!edid) {
10550                 dm_con_state = to_dm_connector_state(connector->state);
10551
10552                 amdgpu_dm_connector->min_vfreq = 0;
10553                 amdgpu_dm_connector->max_vfreq = 0;
10554                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10555
10556                 goto update;
10557         }
10558
10559         dm_con_state = to_dm_connector_state(connector->state);
10560
10561         if (!amdgpu_dm_connector->dc_sink) {
10562                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10563                 goto update;
10564         }
10565         if (!adev->dm.freesync_module)
10566                 goto update;
10567
10568
10569         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10570                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10571                 bool edid_check_required = false;
10572
10573                 if (edid) {
10574                         edid_check_required = is_dp_capable_without_timing_msa(
10575                                                 adev->dm.dc,
10576                                                 amdgpu_dm_connector);
10577                 }
10578
10579                 if (edid_check_required == true && (edid->version > 1 ||
10580                    (edid->version == 1 && edid->revision > 1))) {
10581                         for (i = 0; i < 4; i++) {
10582
10583                                 timing  = &edid->detailed_timings[i];
10584                                 data    = &timing->data.other_data;
10585                                 range   = &data->data.range;
10586                                 /*
10587                                  * Check if monitor has continuous frequency mode
10588                                  */
10589                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10590                                         continue;
10591                                 /*
10592                                  * Check for flag range limits only. If flag == 1 then
10593                                  * no additional timing information provided.
10594                                  * Default GTF, GTF Secondary curve and CVT are not
10595                                  * supported
10596                                  */
10597                                 if (range->flags != 1)
10598                                         continue;
10599
10600                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10601                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10602                                 amdgpu_dm_connector->pixel_clock_mhz =
10603                                         range->pixel_clock_mhz * 10;
10604
10605                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10606                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10607
10608                                 break;
10609                         }
10610
10611                         if (amdgpu_dm_connector->max_vfreq -
10612                             amdgpu_dm_connector->min_vfreq > 10) {
10613
10614                                 freesync_capable = true;
10615                         }
10616                 }
10617         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10618                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10619                 if (i >= 0 && vsdb_info.freesync_supported) {
10620                         timing  = &edid->detailed_timings[i];
10621                         data    = &timing->data.other_data;
10622
10623                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10624                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10625                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10626                                 freesync_capable = true;
10627
10628                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10629                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10630                 }
10631         }
10632
10633 update:
10634         if (dm_con_state)
10635                 dm_con_state->freesync_capable = freesync_capable;
10636
10637         if (connector->vrr_capable_property)
10638                 drm_connector_set_vrr_capable_property(connector,
10639                                                        freesync_capable);
10640 }
10641
10642 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10643 {
10644         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10645
10646         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10647                 return;
10648         if (link->type == dc_connection_none)
10649                 return;
10650         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10651                                         dpcd_data, sizeof(dpcd_data))) {
10652                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10653
10654                 if (dpcd_data[0] == 0) {
10655                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10656                         link->psr_settings.psr_feature_enabled = false;
10657                 } else {
10658                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10659                         link->psr_settings.psr_feature_enabled = true;
10660                 }
10661
10662                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10663         }
10664 }
10665
10666 /*
10667  * amdgpu_dm_link_setup_psr() - configure psr link
10668  * @stream: stream state
10669  *
10670  * Return: true if success
10671  */
10672 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10673 {
10674         struct dc_link *link = NULL;
10675         struct psr_config psr_config = {0};
10676         struct psr_context psr_context = {0};
10677         bool ret = false;
10678
10679         if (stream == NULL)
10680                 return false;
10681
10682         link = stream->link;
10683
10684         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10685
10686         if (psr_config.psr_version > 0) {
10687                 psr_config.psr_exit_link_training_required = 0x1;
10688                 psr_config.psr_frame_capture_indication_req = 0;
10689                 psr_config.psr_rfb_setup_time = 0x37;
10690                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10691                 psr_config.allow_smu_optimizations = 0x0;
10692
10693                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10694
10695         }
10696         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10697
10698         return ret;
10699 }
10700
10701 /*
10702  * amdgpu_dm_psr_enable() - enable psr f/w
10703  * @stream: stream state
10704  *
10705  * Return: true if success
10706  */
10707 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10708 {
10709         struct dc_link *link = stream->link;
10710         unsigned int vsync_rate_hz = 0;
10711         struct dc_static_screen_params params = {0};
10712         /* Calculate number of static frames before generating interrupt to
10713          * enter PSR.
10714          */
10715         // Init fail safe of 2 frames static
10716         unsigned int num_frames_static = 2;
10717
10718         DRM_DEBUG_DRIVER("Enabling psr...\n");
10719
10720         vsync_rate_hz = div64_u64(div64_u64((
10721                         stream->timing.pix_clk_100hz * 100),
10722                         stream->timing.v_total),
10723                         stream->timing.h_total);
10724
10725         /* Round up
10726          * Calculate number of frames such that at least 30 ms of time has
10727          * passed.
10728          */
10729         if (vsync_rate_hz != 0) {
10730                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10731                 num_frames_static = (30000 / frame_time_microsec) + 1;
10732         }
10733
10734         params.triggers.cursor_update = true;
10735         params.triggers.overlay_update = true;
10736         params.triggers.surface_update = true;
10737         params.num_frames = num_frames_static;
10738
10739         dc_stream_set_static_screen_params(link->ctx->dc,
10740                                            &stream, 1,
10741                                            &params);
10742
10743         return dc_link_set_psr_allow_active(link, true, false, false);
10744 }
10745
10746 /*
10747  * amdgpu_dm_psr_disable() - disable psr f/w
10748  * @stream:  stream state
10749  *
10750  * Return: true if success
10751  */
10752 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10753 {
10754
10755         DRM_DEBUG_DRIVER("Disabling psr...\n");
10756
10757         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10758 }
10759
10760 /*
10761  * amdgpu_dm_psr_disable() - disable psr f/w
10762  * if psr is enabled on any stream
10763  *
10764  * Return: true if success
10765  */
10766 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10767 {
10768         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10769         return dc_set_psr_allow_active(dm->dc, false);
10770 }
10771
10772 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10773 {
10774         struct amdgpu_device *adev = drm_to_adev(dev);
10775         struct dc *dc = adev->dm.dc;
10776         int i;
10777
10778         mutex_lock(&adev->dm.dc_lock);
10779         if (dc->current_state) {
10780                 for (i = 0; i < dc->current_state->stream_count; ++i)
10781                         dc->current_state->streams[i]
10782                                 ->triggered_crtc_reset.enabled =
10783                                 adev->dm.force_timing_sync;
10784
10785                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10786                 dc_trigger_sync(dc, dc->current_state);
10787         }
10788         mutex_unlock(&adev->dm.dc_lock);
10789 }
10790
10791 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10792                        uint32_t value, const char *func_name)
10793 {
10794 #ifdef DM_CHECK_ADDR_0
10795         if (address == 0) {
10796                 DC_ERR("invalid register write. address = 0");
10797                 return;
10798         }
10799 #endif
10800         cgs_write_register(ctx->cgs_device, address, value);
10801         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10802 }
10803
10804 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10805                           const char *func_name)
10806 {
10807         uint32_t value;
10808 #ifdef DM_CHECK_ADDR_0
10809         if (address == 0) {
10810                 DC_ERR("invalid register read; address = 0\n");
10811                 return 0;
10812         }
10813 #endif
10814
10815         if (ctx->dmub_srv &&
10816             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10817             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10818                 ASSERT(false);
10819                 return 0;
10820         }
10821
10822         value = cgs_read_register(ctx->cgs_device, address);
10823
10824         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10825
10826         return value;
10827 }
10828
10829 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10830                                 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10831 {
10832         struct amdgpu_device *adev = ctx->driver_context;
10833         int ret = 0;
10834
10835         dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10836         ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10837         if (ret == 0) {
10838                 *operation_result = AUX_RET_ERROR_TIMEOUT;
10839                 return -1;
10840         }
10841         *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10842
10843         if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10844                 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10845
10846                 // For read case, Copy data to payload
10847                 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10848                 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10849                         memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10850                         adev->dm.dmub_notify->aux_reply.length);
10851         }
10852
10853         return adev->dm.dmub_notify->aux_reply.length;
10854 }