Merge tag 'amd-drm-next-5.20-2022-07-05' of https://gitlab.freedesktop.org/agd5f...
[linux-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75
76 #include <drm/display/drm_dp_mst_helper.h>
77 #include <drm/display/drm_hdmi_helper.h>
78 #include <drm/drm_atomic.h>
79 #include <drm/drm_atomic_uapi.h>
80 #include <drm/drm_atomic_helper.h>
81 #include <drm/drm_blend.h>
82 #include <drm/drm_fb_helper.h>
83 #include <drm/drm_fourcc.h>
84 #include <drm/drm_edid.h>
85 #include <drm/drm_vblank.h>
86 #include <drm/drm_audio_component.h>
87 #include <drm/drm_gem_atomic_helper.h>
88
89 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
90
91 #include "dcn/dcn_1_0_offset.h"
92 #include "dcn/dcn_1_0_sh_mask.h"
93 #include "soc15_hw_ip.h"
94 #include "soc15_common.h"
95 #include "vega10_ip_offset.h"
96
97 #include "soc15_common.h"
98
99 #include "gc/gc_11_0_0_offset.h"
100 #include "gc/gc_11_0_0_sh_mask.h"
101
102 #include "modules/inc/mod_freesync.h"
103 #include "modules/power/power_helpers.h"
104 #include "modules/inc/mod_info_packet.h"
105
106 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
108 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
110 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
112 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
114 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
116 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
118 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
120 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
121 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
122 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
123 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
124 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
125 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
126
127 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
128 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
129 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
130 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
131
132 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
133 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
134
135 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
136 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
137
138 /* Number of bytes in PSP header for firmware. */
139 #define PSP_HEADER_BYTES 0x100
140
141 /* Number of bytes in PSP footer for firmware. */
142 #define PSP_FOOTER_BYTES 0x100
143
144 /**
145  * DOC: overview
146  *
147  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
148  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
149  * requests into DC requests, and DC responses into DRM responses.
150  *
151  * The root control structure is &struct amdgpu_display_manager.
152  */
153
154 /* basic init/fini API */
155 static int amdgpu_dm_init(struct amdgpu_device *adev);
156 static void amdgpu_dm_fini(struct amdgpu_device *adev);
157 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
158
159 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
160 {
161         switch (link->dpcd_caps.dongle_type) {
162         case DISPLAY_DONGLE_NONE:
163                 return DRM_MODE_SUBCONNECTOR_Native;
164         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
165                 return DRM_MODE_SUBCONNECTOR_VGA;
166         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
167         case DISPLAY_DONGLE_DP_DVI_DONGLE:
168                 return DRM_MODE_SUBCONNECTOR_DVID;
169         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
170         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
171                 return DRM_MODE_SUBCONNECTOR_HDMIA;
172         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
173         default:
174                 return DRM_MODE_SUBCONNECTOR_Unknown;
175         }
176 }
177
178 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
179 {
180         struct dc_link *link = aconnector->dc_link;
181         struct drm_connector *connector = &aconnector->base;
182         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
183
184         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
185                 return;
186
187         if (aconnector->dc_sink)
188                 subconnector = get_subconnector_type(link);
189
190         drm_object_property_set_value(&connector->base,
191                         connector->dev->mode_config.dp_subconnector_property,
192                         subconnector);
193 }
194
195 /*
196  * initializes drm_device display related structures, based on the information
197  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
198  * drm_encoder, drm_mode_config
199  *
200  * Returns 0 on success
201  */
202 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
203 /* removes and deallocates the drm structures, created by the above function */
204 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
205
206 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
207                                 struct drm_plane *plane,
208                                 unsigned long possible_crtcs,
209                                 const struct dc_plane_cap *plane_cap);
210 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
211                                struct drm_plane *plane,
212                                uint32_t link_index);
213 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
214                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
215                                     uint32_t link_index,
216                                     struct amdgpu_encoder *amdgpu_encoder);
217 static int amdgpu_dm_encoder_init(struct drm_device *dev,
218                                   struct amdgpu_encoder *aencoder,
219                                   uint32_t link_index);
220
221 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
222
223 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
224
225 static int amdgpu_dm_atomic_check(struct drm_device *dev,
226                                   struct drm_atomic_state *state);
227
228 static void handle_cursor_update(struct drm_plane *plane,
229                                  struct drm_plane_state *old_plane_state);
230
231 static const struct drm_format_info *
232 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
233
234 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
235 static void handle_hpd_rx_irq(void *param);
236
237 static bool
238 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
239                                  struct drm_crtc_state *new_crtc_state);
240 /*
241  * dm_vblank_get_counter
242  *
243  * @brief
244  * Get counter for number of vertical blanks
245  *
246  * @param
247  * struct amdgpu_device *adev - [in] desired amdgpu device
248  * int disp_idx - [in] which CRTC to get the counter from
249  *
250  * @return
251  * Counter for vertical blanks
252  */
253 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
254 {
255         if (crtc >= adev->mode_info.num_crtc)
256                 return 0;
257         else {
258                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
259
260                 if (acrtc->dm_irq_params.stream == NULL) {
261                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
262                                   crtc);
263                         return 0;
264                 }
265
266                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
267         }
268 }
269
270 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
271                                   u32 *vbl, u32 *position)
272 {
273         uint32_t v_blank_start, v_blank_end, h_position, v_position;
274
275         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
276                 return -EINVAL;
277         else {
278                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
279
280                 if (acrtc->dm_irq_params.stream ==  NULL) {
281                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
282                                   crtc);
283                         return 0;
284                 }
285
286                 /*
287                  * TODO rework base driver to use values directly.
288                  * for now parse it back into reg-format
289                  */
290                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
291                                          &v_blank_start,
292                                          &v_blank_end,
293                                          &h_position,
294                                          &v_position);
295
296                 *position = v_position | (h_position << 16);
297                 *vbl = v_blank_start | (v_blank_end << 16);
298         }
299
300         return 0;
301 }
302
303 static bool dm_is_idle(void *handle)
304 {
305         /* XXX todo */
306         return true;
307 }
308
309 static int dm_wait_for_idle(void *handle)
310 {
311         /* XXX todo */
312         return 0;
313 }
314
315 static bool dm_check_soft_reset(void *handle)
316 {
317         return false;
318 }
319
320 static int dm_soft_reset(void *handle)
321 {
322         /* XXX todo */
323         return 0;
324 }
325
326 static struct amdgpu_crtc *
327 get_crtc_by_otg_inst(struct amdgpu_device *adev,
328                      int otg_inst)
329 {
330         struct drm_device *dev = adev_to_drm(adev);
331         struct drm_crtc *crtc;
332         struct amdgpu_crtc *amdgpu_crtc;
333
334         if (WARN_ON(otg_inst == -1))
335                 return adev->mode_info.crtcs[0];
336
337         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
338                 amdgpu_crtc = to_amdgpu_crtc(crtc);
339
340                 if (amdgpu_crtc->otg_inst == otg_inst)
341                         return amdgpu_crtc;
342         }
343
344         return NULL;
345 }
346
347 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
348 {
349         return acrtc->dm_irq_params.freesync_config.state ==
350                        VRR_STATE_ACTIVE_VARIABLE ||
351                acrtc->dm_irq_params.freesync_config.state ==
352                        VRR_STATE_ACTIVE_FIXED;
353 }
354
355 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
356 {
357         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
358                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
359 }
360
361 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
362                                               struct dm_crtc_state *new_state)
363 {
364         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
365                 return true;
366         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
367                 return true;
368         else
369                 return false;
370 }
371
372 /**
373  * dm_pflip_high_irq() - Handle pageflip interrupt
374  * @interrupt_params: ignored
375  *
376  * Handles the pageflip interrupt by notifying all interested parties
377  * that the pageflip has been completed.
378  */
379 static void dm_pflip_high_irq(void *interrupt_params)
380 {
381         struct amdgpu_crtc *amdgpu_crtc;
382         struct common_irq_params *irq_params = interrupt_params;
383         struct amdgpu_device *adev = irq_params->adev;
384         unsigned long flags;
385         struct drm_pending_vblank_event *e;
386         uint32_t vpos, hpos, v_blank_start, v_blank_end;
387         bool vrr_active;
388
389         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
390
391         /* IRQ could occur when in initial stage */
392         /* TODO work and BO cleanup */
393         if (amdgpu_crtc == NULL) {
394                 DC_LOG_PFLIP("CRTC is null, returning.\n");
395                 return;
396         }
397
398         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
399
400         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
401                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
402                                                  amdgpu_crtc->pflip_status,
403                                                  AMDGPU_FLIP_SUBMITTED,
404                                                  amdgpu_crtc->crtc_id,
405                                                  amdgpu_crtc);
406                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
407                 return;
408         }
409
410         /* page flip completed. */
411         e = amdgpu_crtc->event;
412         amdgpu_crtc->event = NULL;
413
414         WARN_ON(!e);
415
416         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
417
418         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
419         if (!vrr_active ||
420             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
421                                       &v_blank_end, &hpos, &vpos) ||
422             (vpos < v_blank_start)) {
423                 /* Update to correct count and vblank timestamp if racing with
424                  * vblank irq. This also updates to the correct vblank timestamp
425                  * even in VRR mode, as scanout is past the front-porch atm.
426                  */
427                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
428
429                 /* Wake up userspace by sending the pageflip event with proper
430                  * count and timestamp of vblank of flip completion.
431                  */
432                 if (e) {
433                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
434
435                         /* Event sent, so done with vblank for this flip */
436                         drm_crtc_vblank_put(&amdgpu_crtc->base);
437                 }
438         } else if (e) {
439                 /* VRR active and inside front-porch: vblank count and
440                  * timestamp for pageflip event will only be up to date after
441                  * drm_crtc_handle_vblank() has been executed from late vblank
442                  * irq handler after start of back-porch (vline 0). We queue the
443                  * pageflip event for send-out by drm_crtc_handle_vblank() with
444                  * updated timestamp and count, once it runs after us.
445                  *
446                  * We need to open-code this instead of using the helper
447                  * drm_crtc_arm_vblank_event(), as that helper would
448                  * call drm_crtc_accurate_vblank_count(), which we must
449                  * not call in VRR mode while we are in front-porch!
450                  */
451
452                 /* sequence will be replaced by real count during send-out. */
453                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
454                 e->pipe = amdgpu_crtc->crtc_id;
455
456                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
457                 e = NULL;
458         }
459
460         /* Keep track of vblank of this flip for flip throttling. We use the
461          * cooked hw counter, as that one incremented at start of this vblank
462          * of pageflip completion, so last_flip_vblank is the forbidden count
463          * for queueing new pageflips if vsync + VRR is enabled.
464          */
465         amdgpu_crtc->dm_irq_params.last_flip_vblank =
466                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
467
468         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
469         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
470
471         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
472                      amdgpu_crtc->crtc_id, amdgpu_crtc,
473                      vrr_active, (int) !e);
474 }
475
476 static void dm_vupdate_high_irq(void *interrupt_params)
477 {
478         struct common_irq_params *irq_params = interrupt_params;
479         struct amdgpu_device *adev = irq_params->adev;
480         struct amdgpu_crtc *acrtc;
481         struct drm_device *drm_dev;
482         struct drm_vblank_crtc *vblank;
483         ktime_t frame_duration_ns, previous_timestamp;
484         unsigned long flags;
485         int vrr_active;
486
487         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
488
489         if (acrtc) {
490                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
491                 drm_dev = acrtc->base.dev;
492                 vblank = &drm_dev->vblank[acrtc->base.index];
493                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
494                 frame_duration_ns = vblank->time - previous_timestamp;
495
496                 if (frame_duration_ns > 0) {
497                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
498                                                 frame_duration_ns,
499                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
500                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
501                 }
502
503                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
504                               acrtc->crtc_id,
505                               vrr_active);
506
507                 /* Core vblank handling is done here after end of front-porch in
508                  * vrr mode, as vblank timestamping will give valid results
509                  * while now done after front-porch. This will also deliver
510                  * page-flip completion events that have been queued to us
511                  * if a pageflip happened inside front-porch.
512                  */
513                 if (vrr_active) {
514                         drm_crtc_handle_vblank(&acrtc->base);
515
516                         /* BTR processing for pre-DCE12 ASICs */
517                         if (acrtc->dm_irq_params.stream &&
518                             adev->family < AMDGPU_FAMILY_AI) {
519                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
520                                 mod_freesync_handle_v_update(
521                                     adev->dm.freesync_module,
522                                     acrtc->dm_irq_params.stream,
523                                     &acrtc->dm_irq_params.vrr_params);
524
525                                 dc_stream_adjust_vmin_vmax(
526                                     adev->dm.dc,
527                                     acrtc->dm_irq_params.stream,
528                                     &acrtc->dm_irq_params.vrr_params.adjust);
529                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
530                         }
531                 }
532         }
533 }
534
535 /**
536  * dm_crtc_high_irq() - Handles CRTC interrupt
537  * @interrupt_params: used for determining the CRTC instance
538  *
539  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
540  * event handler.
541  */
542 static void dm_crtc_high_irq(void *interrupt_params)
543 {
544         struct common_irq_params *irq_params = interrupt_params;
545         struct amdgpu_device *adev = irq_params->adev;
546         struct amdgpu_crtc *acrtc;
547         unsigned long flags;
548         int vrr_active;
549
550         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
551         if (!acrtc)
552                 return;
553
554         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
555
556         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
557                       vrr_active, acrtc->dm_irq_params.active_planes);
558
559         /**
560          * Core vblank handling at start of front-porch is only possible
561          * in non-vrr mode, as only there vblank timestamping will give
562          * valid results while done in front-porch. Otherwise defer it
563          * to dm_vupdate_high_irq after end of front-porch.
564          */
565         if (!vrr_active)
566                 drm_crtc_handle_vblank(&acrtc->base);
567
568         /**
569          * Following stuff must happen at start of vblank, for crc
570          * computation and below-the-range btr support in vrr mode.
571          */
572         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
573
574         /* BTR updates need to happen before VUPDATE on Vega and above. */
575         if (adev->family < AMDGPU_FAMILY_AI)
576                 return;
577
578         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
579
580         if (acrtc->dm_irq_params.stream &&
581             acrtc->dm_irq_params.vrr_params.supported &&
582             acrtc->dm_irq_params.freesync_config.state ==
583                     VRR_STATE_ACTIVE_VARIABLE) {
584                 mod_freesync_handle_v_update(adev->dm.freesync_module,
585                                              acrtc->dm_irq_params.stream,
586                                              &acrtc->dm_irq_params.vrr_params);
587
588                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
589                                            &acrtc->dm_irq_params.vrr_params.adjust);
590         }
591
592         /*
593          * If there aren't any active_planes then DCH HUBP may be clock-gated.
594          * In that case, pageflip completion interrupts won't fire and pageflip
595          * completion events won't get delivered. Prevent this by sending
596          * pending pageflip events from here if a flip is still pending.
597          *
598          * If any planes are enabled, use dm_pflip_high_irq() instead, to
599          * avoid race conditions between flip programming and completion,
600          * which could cause too early flip completion events.
601          */
602         if (adev->family >= AMDGPU_FAMILY_RV &&
603             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
604             acrtc->dm_irq_params.active_planes == 0) {
605                 if (acrtc->event) {
606                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
607                         acrtc->event = NULL;
608                         drm_crtc_vblank_put(&acrtc->base);
609                 }
610                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
611         }
612
613         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
614 }
615
616 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
617 /**
618  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
619  * DCN generation ASICs
620  * @interrupt_params: interrupt parameters
621  *
622  * Used to set crc window/read out crc value at vertical line 0 position
623  */
624 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
625 {
626         struct common_irq_params *irq_params = interrupt_params;
627         struct amdgpu_device *adev = irq_params->adev;
628         struct amdgpu_crtc *acrtc;
629
630         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
631
632         if (!acrtc)
633                 return;
634
635         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
636 }
637 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
638
639 /**
640  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
641  * @adev: amdgpu_device pointer
642  * @notify: dmub notification structure
643  *
644  * Dmub AUX or SET_CONFIG command completion processing callback
645  * Copies dmub notification to DM which is to be read by AUX command.
646  * issuing thread and also signals the event to wake up the thread.
647  */
648 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
649                                         struct dmub_notification *notify)
650 {
651         if (adev->dm.dmub_notify)
652                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
653         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
654                 complete(&adev->dm.dmub_aux_transfer_done);
655 }
656
657 /**
658  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
659  * @adev: amdgpu_device pointer
660  * @notify: dmub notification structure
661  *
662  * Dmub Hpd interrupt processing callback. Gets displayindex through the
663  * ink index and calls helper to do the processing.
664  */
665 static void dmub_hpd_callback(struct amdgpu_device *adev,
666                               struct dmub_notification *notify)
667 {
668         struct amdgpu_dm_connector *aconnector;
669         struct amdgpu_dm_connector *hpd_aconnector = NULL;
670         struct drm_connector *connector;
671         struct drm_connector_list_iter iter;
672         struct dc_link *link;
673         uint8_t link_index = 0;
674         struct drm_device *dev;
675
676         if (adev == NULL)
677                 return;
678
679         if (notify == NULL) {
680                 DRM_ERROR("DMUB HPD callback notification was NULL");
681                 return;
682         }
683
684         if (notify->link_index > adev->dm.dc->link_count) {
685                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
686                 return;
687         }
688
689         link_index = notify->link_index;
690         link = adev->dm.dc->links[link_index];
691         dev = adev->dm.ddev;
692
693         drm_connector_list_iter_begin(dev, &iter);
694         drm_for_each_connector_iter(connector, &iter) {
695                 aconnector = to_amdgpu_dm_connector(connector);
696                 if (link && aconnector->dc_link == link) {
697                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
698                         hpd_aconnector = aconnector;
699                         break;
700                 }
701         }
702         drm_connector_list_iter_end(&iter);
703
704         if (hpd_aconnector) {
705                 if (notify->type == DMUB_NOTIFICATION_HPD)
706                         handle_hpd_irq_helper(hpd_aconnector);
707                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
708                         handle_hpd_rx_irq(hpd_aconnector);
709         }
710 }
711
712 /**
713  * register_dmub_notify_callback - Sets callback for DMUB notify
714  * @adev: amdgpu_device pointer
715  * @type: Type of dmub notification
716  * @callback: Dmub interrupt callback function
717  * @dmub_int_thread_offload: offload indicator
718  *
719  * API to register a dmub callback handler for a dmub notification
720  * Also sets indicator whether callback processing to be offloaded.
721  * to dmub interrupt handling thread
722  * Return: true if successfully registered, false if there is existing registration
723  */
724 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
725                                           enum dmub_notification_type type,
726                                           dmub_notify_interrupt_callback_t callback,
727                                           bool dmub_int_thread_offload)
728 {
729         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
730                 adev->dm.dmub_callback[type] = callback;
731                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
732         } else
733                 return false;
734
735         return true;
736 }
737
738 static void dm_handle_hpd_work(struct work_struct *work)
739 {
740         struct dmub_hpd_work *dmub_hpd_wrk;
741
742         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
743
744         if (!dmub_hpd_wrk->dmub_notify) {
745                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
746                 return;
747         }
748
749         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
750                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
751                 dmub_hpd_wrk->dmub_notify);
752         }
753
754         kfree(dmub_hpd_wrk->dmub_notify);
755         kfree(dmub_hpd_wrk);
756
757 }
758
759 #define DMUB_TRACE_MAX_READ 64
760 /**
761  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
762  * @interrupt_params: used for determining the Outbox instance
763  *
764  * Handles the Outbox Interrupt
765  * event handler.
766  */
767 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
768 {
769         struct dmub_notification notify;
770         struct common_irq_params *irq_params = interrupt_params;
771         struct amdgpu_device *adev = irq_params->adev;
772         struct amdgpu_display_manager *dm = &adev->dm;
773         struct dmcub_trace_buf_entry entry = { 0 };
774         uint32_t count = 0;
775         struct dmub_hpd_work *dmub_hpd_wrk;
776         struct dc_link *plink = NULL;
777
778         if (dc_enable_dmub_notifications(adev->dm.dc) &&
779                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
780
781                 do {
782                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
783                         if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
784                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
785                                 continue;
786                         }
787                         if (!dm->dmub_callback[notify.type]) {
788                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
789                                 continue;
790                         }
791                         if (dm->dmub_thread_offload[notify.type] == true) {
792                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
793                                 if (!dmub_hpd_wrk) {
794                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
795                                         return;
796                                 }
797                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
798                                 if (!dmub_hpd_wrk->dmub_notify) {
799                                         kfree(dmub_hpd_wrk);
800                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
801                                         return;
802                                 }
803                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
804                                 if (dmub_hpd_wrk->dmub_notify)
805                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
806                                 dmub_hpd_wrk->adev = adev;
807                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
808                                         plink = adev->dm.dc->links[notify.link_index];
809                                         if (plink) {
810                                                 plink->hpd_status =
811                                                         notify.hpd_status == DP_HPD_PLUG;
812                                         }
813                                 }
814                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
815                         } else {
816                                 dm->dmub_callback[notify.type](adev, &notify);
817                         }
818                 } while (notify.pending_notification);
819         }
820
821
822         do {
823                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
824                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
825                                                         entry.param0, entry.param1);
826
827                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
828                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
829                 } else
830                         break;
831
832                 count++;
833
834         } while (count <= DMUB_TRACE_MAX_READ);
835
836         if (count > DMUB_TRACE_MAX_READ)
837                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
838 }
839
840 static int dm_set_clockgating_state(void *handle,
841                   enum amd_clockgating_state state)
842 {
843         return 0;
844 }
845
846 static int dm_set_powergating_state(void *handle,
847                   enum amd_powergating_state state)
848 {
849         return 0;
850 }
851
852 /* Prototypes of private functions */
853 static int dm_early_init(void* handle);
854
855 /* Allocate memory for FBC compressed data  */
856 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
857 {
858         struct drm_device *dev = connector->dev;
859         struct amdgpu_device *adev = drm_to_adev(dev);
860         struct dm_compressor_info *compressor = &adev->dm.compressor;
861         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
862         struct drm_display_mode *mode;
863         unsigned long max_size = 0;
864
865         if (adev->dm.dc->fbc_compressor == NULL)
866                 return;
867
868         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
869                 return;
870
871         if (compressor->bo_ptr)
872                 return;
873
874
875         list_for_each_entry(mode, &connector->modes, head) {
876                 if (max_size < mode->htotal * mode->vtotal)
877                         max_size = mode->htotal * mode->vtotal;
878         }
879
880         if (max_size) {
881                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
882                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
883                             &compressor->gpu_addr, &compressor->cpu_addr);
884
885                 if (r)
886                         DRM_ERROR("DM: Failed to initialize FBC\n");
887                 else {
888                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
889                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
890                 }
891
892         }
893
894 }
895
896 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
897                                           int pipe, bool *enabled,
898                                           unsigned char *buf, int max_bytes)
899 {
900         struct drm_device *dev = dev_get_drvdata(kdev);
901         struct amdgpu_device *adev = drm_to_adev(dev);
902         struct drm_connector *connector;
903         struct drm_connector_list_iter conn_iter;
904         struct amdgpu_dm_connector *aconnector;
905         int ret = 0;
906
907         *enabled = false;
908
909         mutex_lock(&adev->dm.audio_lock);
910
911         drm_connector_list_iter_begin(dev, &conn_iter);
912         drm_for_each_connector_iter(connector, &conn_iter) {
913                 aconnector = to_amdgpu_dm_connector(connector);
914                 if (aconnector->audio_inst != port)
915                         continue;
916
917                 *enabled = true;
918                 ret = drm_eld_size(connector->eld);
919                 memcpy(buf, connector->eld, min(max_bytes, ret));
920
921                 break;
922         }
923         drm_connector_list_iter_end(&conn_iter);
924
925         mutex_unlock(&adev->dm.audio_lock);
926
927         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
928
929         return ret;
930 }
931
932 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
933         .get_eld = amdgpu_dm_audio_component_get_eld,
934 };
935
936 static int amdgpu_dm_audio_component_bind(struct device *kdev,
937                                        struct device *hda_kdev, void *data)
938 {
939         struct drm_device *dev = dev_get_drvdata(kdev);
940         struct amdgpu_device *adev = drm_to_adev(dev);
941         struct drm_audio_component *acomp = data;
942
943         acomp->ops = &amdgpu_dm_audio_component_ops;
944         acomp->dev = kdev;
945         adev->dm.audio_component = acomp;
946
947         return 0;
948 }
949
950 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
951                                           struct device *hda_kdev, void *data)
952 {
953         struct drm_device *dev = dev_get_drvdata(kdev);
954         struct amdgpu_device *adev = drm_to_adev(dev);
955         struct drm_audio_component *acomp = data;
956
957         acomp->ops = NULL;
958         acomp->dev = NULL;
959         adev->dm.audio_component = NULL;
960 }
961
962 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
963         .bind   = amdgpu_dm_audio_component_bind,
964         .unbind = amdgpu_dm_audio_component_unbind,
965 };
966
967 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
968 {
969         int i, ret;
970
971         if (!amdgpu_audio)
972                 return 0;
973
974         adev->mode_info.audio.enabled = true;
975
976         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
977
978         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
979                 adev->mode_info.audio.pin[i].channels = -1;
980                 adev->mode_info.audio.pin[i].rate = -1;
981                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
982                 adev->mode_info.audio.pin[i].status_bits = 0;
983                 adev->mode_info.audio.pin[i].category_code = 0;
984                 adev->mode_info.audio.pin[i].connected = false;
985                 adev->mode_info.audio.pin[i].id =
986                         adev->dm.dc->res_pool->audios[i]->inst;
987                 adev->mode_info.audio.pin[i].offset = 0;
988         }
989
990         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
991         if (ret < 0)
992                 return ret;
993
994         adev->dm.audio_registered = true;
995
996         return 0;
997 }
998
999 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1000 {
1001         if (!amdgpu_audio)
1002                 return;
1003
1004         if (!adev->mode_info.audio.enabled)
1005                 return;
1006
1007         if (adev->dm.audio_registered) {
1008                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1009                 adev->dm.audio_registered = false;
1010         }
1011
1012         /* TODO: Disable audio? */
1013
1014         adev->mode_info.audio.enabled = false;
1015 }
1016
1017 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1018 {
1019         struct drm_audio_component *acomp = adev->dm.audio_component;
1020
1021         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1022                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1023
1024                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1025                                                  pin, -1);
1026         }
1027 }
1028
1029 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1030 {
1031         const struct dmcub_firmware_header_v1_0 *hdr;
1032         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1033         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1034         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1035         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1036         struct abm *abm = adev->dm.dc->res_pool->abm;
1037         struct dmub_srv_hw_params hw_params;
1038         enum dmub_status status;
1039         const unsigned char *fw_inst_const, *fw_bss_data;
1040         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1041         bool has_hw_support;
1042
1043         if (!dmub_srv)
1044                 /* DMUB isn't supported on the ASIC. */
1045                 return 0;
1046
1047         if (!fb_info) {
1048                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1049                 return -EINVAL;
1050         }
1051
1052         if (!dmub_fw) {
1053                 /* Firmware required for DMUB support. */
1054                 DRM_ERROR("No firmware provided for DMUB.\n");
1055                 return -EINVAL;
1056         }
1057
1058         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1059         if (status != DMUB_STATUS_OK) {
1060                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1061                 return -EINVAL;
1062         }
1063
1064         if (!has_hw_support) {
1065                 DRM_INFO("DMUB unsupported on ASIC\n");
1066                 return 0;
1067         }
1068
1069         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1070         status = dmub_srv_hw_reset(dmub_srv);
1071         if (status != DMUB_STATUS_OK)
1072                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1073
1074         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1075
1076         fw_inst_const = dmub_fw->data +
1077                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1078                         PSP_HEADER_BYTES;
1079
1080         fw_bss_data = dmub_fw->data +
1081                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1082                       le32_to_cpu(hdr->inst_const_bytes);
1083
1084         /* Copy firmware and bios info into FB memory. */
1085         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1086                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1087
1088         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1089
1090         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1091          * amdgpu_ucode_init_single_fw will load dmub firmware
1092          * fw_inst_const part to cw0; otherwise, the firmware back door load
1093          * will be done by dm_dmub_hw_init
1094          */
1095         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1096                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1097                                 fw_inst_const_size);
1098         }
1099
1100         if (fw_bss_data_size)
1101                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1102                        fw_bss_data, fw_bss_data_size);
1103
1104         /* Copy firmware bios info into FB memory. */
1105         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1106                adev->bios_size);
1107
1108         /* Reset regions that need to be reset. */
1109         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1110         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1111
1112         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1113                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1114
1115         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1116                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1117
1118         /* Initialize hardware. */
1119         memset(&hw_params, 0, sizeof(hw_params));
1120         hw_params.fb_base = adev->gmc.fb_start;
1121         hw_params.fb_offset = adev->gmc.aper_base;
1122
1123         /* backdoor load firmware and trigger dmub running */
1124         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1125                 hw_params.load_inst_const = true;
1126
1127         if (dmcu)
1128                 hw_params.psp_version = dmcu->psp_version;
1129
1130         for (i = 0; i < fb_info->num_fb; ++i)
1131                 hw_params.fb[i] = &fb_info->fb[i];
1132
1133         switch (adev->ip_versions[DCE_HWIP][0]) {
1134         case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1135                 hw_params.dpia_supported = true;
1136                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1137                 break;
1138         default:
1139                 break;
1140         }
1141
1142         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1143         if (status != DMUB_STATUS_OK) {
1144                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1145                 return -EINVAL;
1146         }
1147
1148         /* Wait for firmware load to finish. */
1149         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1150         if (status != DMUB_STATUS_OK)
1151                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1152
1153         /* Init DMCU and ABM if available. */
1154         if (dmcu && abm) {
1155                 dmcu->funcs->dmcu_init(dmcu);
1156                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1157         }
1158
1159         if (!adev->dm.dc->ctx->dmub_srv)
1160                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1161         if (!adev->dm.dc->ctx->dmub_srv) {
1162                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1163                 return -ENOMEM;
1164         }
1165
1166         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1167                  adev->dm.dmcub_fw_version);
1168
1169         return 0;
1170 }
1171
1172 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1173 {
1174         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1175         enum dmub_status status;
1176         bool init;
1177
1178         if (!dmub_srv) {
1179                 /* DMUB isn't supported on the ASIC. */
1180                 return;
1181         }
1182
1183         status = dmub_srv_is_hw_init(dmub_srv, &init);
1184         if (status != DMUB_STATUS_OK)
1185                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1186
1187         if (status == DMUB_STATUS_OK && init) {
1188                 /* Wait for firmware load to finish. */
1189                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1190                 if (status != DMUB_STATUS_OK)
1191                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1192         } else {
1193                 /* Perform the full hardware initialization. */
1194                 dm_dmub_hw_init(adev);
1195         }
1196 }
1197
1198 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1199 {
1200         uint64_t pt_base;
1201         uint32_t logical_addr_low;
1202         uint32_t logical_addr_high;
1203         uint32_t agp_base, agp_bot, agp_top;
1204         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1205
1206         memset(pa_config, 0, sizeof(*pa_config));
1207
1208         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1209         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1210
1211         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1212                 /*
1213                  * Raven2 has a HW issue that it is unable to use the vram which
1214                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1215                  * workaround that increase system aperture high address (add 1)
1216                  * to get rid of the VM fault and hardware hang.
1217                  */
1218                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1219         else
1220                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1221
1222         agp_base = 0;
1223         agp_bot = adev->gmc.agp_start >> 24;
1224         agp_top = adev->gmc.agp_end >> 24;
1225
1226
1227         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1228         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1229         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1230         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1231         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1232         page_table_base.low_part = lower_32_bits(pt_base);
1233
1234         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1235         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1236
1237         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1238         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1239         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1240
1241         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1242         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1243         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1244
1245         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1246         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1247         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1248
1249         pa_config->is_hvm_enabled = 0;
1250
1251 }
1252
1253 static void vblank_control_worker(struct work_struct *work)
1254 {
1255         struct vblank_control_work *vblank_work =
1256                 container_of(work, struct vblank_control_work, work);
1257         struct amdgpu_display_manager *dm = vblank_work->dm;
1258
1259         mutex_lock(&dm->dc_lock);
1260
1261         if (vblank_work->enable)
1262                 dm->active_vblank_irq_count++;
1263         else if(dm->active_vblank_irq_count)
1264                 dm->active_vblank_irq_count--;
1265
1266         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1267
1268         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1269
1270         /*
1271          * Control PSR based on vblank requirements from OS
1272          *
1273          * If panel supports PSR SU, there's no need to disable PSR when OS is
1274          * submitting fast atomic commits (we infer this by whether the OS
1275          * requests vblank events). Fast atomic commits will simply trigger a
1276          * full-frame-update (FFU); a specific case of selective-update (SU)
1277          * where the SU region is the full hactive*vactive region. See
1278          * fill_dc_dirty_rects().
1279          */
1280         if (vblank_work->stream && vblank_work->stream->link) {
1281                 if (vblank_work->enable) {
1282                         if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
1283                             vblank_work->stream->link->psr_settings.psr_allow_active)
1284                                 amdgpu_dm_psr_disable(vblank_work->stream);
1285                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1286                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1287                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1288                         amdgpu_dm_psr_enable(vblank_work->stream);
1289                 }
1290         }
1291
1292         mutex_unlock(&dm->dc_lock);
1293
1294         dc_stream_release(vblank_work->stream);
1295
1296         kfree(vblank_work);
1297 }
1298
1299 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1300 {
1301         struct hpd_rx_irq_offload_work *offload_work;
1302         struct amdgpu_dm_connector *aconnector;
1303         struct dc_link *dc_link;
1304         struct amdgpu_device *adev;
1305         enum dc_connection_type new_connection_type = dc_connection_none;
1306         unsigned long flags;
1307
1308         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1309         aconnector = offload_work->offload_wq->aconnector;
1310
1311         if (!aconnector) {
1312                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1313                 goto skip;
1314         }
1315
1316         adev = drm_to_adev(aconnector->base.dev);
1317         dc_link = aconnector->dc_link;
1318
1319         mutex_lock(&aconnector->hpd_lock);
1320         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1321                 DRM_ERROR("KMS: Failed to detect connector\n");
1322         mutex_unlock(&aconnector->hpd_lock);
1323
1324         if (new_connection_type == dc_connection_none)
1325                 goto skip;
1326
1327         if (amdgpu_in_reset(adev))
1328                 goto skip;
1329
1330         mutex_lock(&adev->dm.dc_lock);
1331         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1332                 dc_link_dp_handle_automated_test(dc_link);
1333         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1334                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1335                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1336                 dc_link_dp_handle_link_loss(dc_link);
1337                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1338                 offload_work->offload_wq->is_handling_link_loss = false;
1339                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1340         }
1341         mutex_unlock(&adev->dm.dc_lock);
1342
1343 skip:
1344         kfree(offload_work);
1345
1346 }
1347
1348 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1349 {
1350         int max_caps = dc->caps.max_links;
1351         int i = 0;
1352         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1353
1354         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1355
1356         if (!hpd_rx_offload_wq)
1357                 return NULL;
1358
1359
1360         for (i = 0; i < max_caps; i++) {
1361                 hpd_rx_offload_wq[i].wq =
1362                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1363
1364                 if (hpd_rx_offload_wq[i].wq == NULL) {
1365                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1366                         return NULL;
1367                 }
1368
1369                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1370         }
1371
1372         return hpd_rx_offload_wq;
1373 }
1374
1375 struct amdgpu_stutter_quirk {
1376         u16 chip_vendor;
1377         u16 chip_device;
1378         u16 subsys_vendor;
1379         u16 subsys_device;
1380         u8 revision;
1381 };
1382
1383 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1384         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1385         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1386         { 0, 0, 0, 0, 0 },
1387 };
1388
1389 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1390 {
1391         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1392
1393         while (p && p->chip_device != 0) {
1394                 if (pdev->vendor == p->chip_vendor &&
1395                     pdev->device == p->chip_device &&
1396                     pdev->subsystem_vendor == p->subsys_vendor &&
1397                     pdev->subsystem_device == p->subsys_device &&
1398                     pdev->revision == p->revision) {
1399                         return true;
1400                 }
1401                 ++p;
1402         }
1403         return false;
1404 }
1405
1406 static int amdgpu_dm_init(struct amdgpu_device *adev)
1407 {
1408         struct dc_init_data init_data;
1409 #ifdef CONFIG_DRM_AMD_DC_HDCP
1410         struct dc_callback_init init_params;
1411 #endif
1412         int r;
1413
1414         adev->dm.ddev = adev_to_drm(adev);
1415         adev->dm.adev = adev;
1416
1417         /* Zero all the fields */
1418         memset(&init_data, 0, sizeof(init_data));
1419 #ifdef CONFIG_DRM_AMD_DC_HDCP
1420         memset(&init_params, 0, sizeof(init_params));
1421 #endif
1422
1423         mutex_init(&adev->dm.dc_lock);
1424         mutex_init(&adev->dm.audio_lock);
1425         spin_lock_init(&adev->dm.vblank_lock);
1426
1427         if(amdgpu_dm_irq_init(adev)) {
1428                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1429                 goto error;
1430         }
1431
1432         init_data.asic_id.chip_family = adev->family;
1433
1434         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1435         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1436         init_data.asic_id.chip_id = adev->pdev->device;
1437
1438         init_data.asic_id.vram_width = adev->gmc.vram_width;
1439         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1440         init_data.asic_id.atombios_base_address =
1441                 adev->mode_info.atom_context->bios;
1442
1443         init_data.driver = adev;
1444
1445         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1446
1447         if (!adev->dm.cgs_device) {
1448                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1449                 goto error;
1450         }
1451
1452         init_data.cgs_device = adev->dm.cgs_device;
1453
1454         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1455
1456         switch (adev->ip_versions[DCE_HWIP][0]) {
1457         case IP_VERSION(2, 1, 0):
1458                 switch (adev->dm.dmcub_fw_version) {
1459                 case 0: /* development */
1460                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1461                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1462                         init_data.flags.disable_dmcu = false;
1463                         break;
1464                 default:
1465                         init_data.flags.disable_dmcu = true;
1466                 }
1467                 break;
1468         case IP_VERSION(2, 0, 3):
1469                 init_data.flags.disable_dmcu = true;
1470                 break;
1471         default:
1472                 break;
1473         }
1474
1475         switch (adev->asic_type) {
1476         case CHIP_CARRIZO:
1477         case CHIP_STONEY:
1478                 init_data.flags.gpu_vm_support = true;
1479                 break;
1480         default:
1481                 switch (adev->ip_versions[DCE_HWIP][0]) {
1482                 case IP_VERSION(1, 0, 0):
1483                 case IP_VERSION(1, 0, 1):
1484                         /* enable S/G on PCO and RV2 */
1485                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1486                             (adev->apu_flags & AMD_APU_IS_PICASSO))
1487                                 init_data.flags.gpu_vm_support = true;
1488                         break;
1489                 case IP_VERSION(2, 1, 0):
1490                 case IP_VERSION(3, 0, 1):
1491                 case IP_VERSION(3, 1, 2):
1492                 case IP_VERSION(3, 1, 3):
1493                 case IP_VERSION(3, 1, 5):
1494                 case IP_VERSION(3, 1, 6):
1495                         init_data.flags.gpu_vm_support = true;
1496                         break;
1497                 default:
1498                         break;
1499                 }
1500                 break;
1501         }
1502
1503         if (init_data.flags.gpu_vm_support)
1504                 adev->mode_info.gpu_vm_support = true;
1505
1506         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1507                 init_data.flags.fbc_support = true;
1508
1509         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1510                 init_data.flags.multi_mon_pp_mclk_switch = true;
1511
1512         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1513                 init_data.flags.disable_fractional_pwm = true;
1514
1515         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1516                 init_data.flags.edp_no_power_sequencing = true;
1517
1518         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1519                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1520         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1521                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1522
1523         init_data.flags.seamless_boot_edp_requested = false;
1524
1525         if (check_seamless_boot_capability(adev)) {
1526                 init_data.flags.seamless_boot_edp_requested = true;
1527                 init_data.flags.allow_seamless_boot_optimization = true;
1528                 DRM_INFO("Seamless boot condition check passed\n");
1529         }
1530
1531         init_data.flags.enable_mipi_converter_optimization = true;
1532
1533         INIT_LIST_HEAD(&adev->dm.da_list);
1534         /* Display Core create. */
1535         adev->dm.dc = dc_create(&init_data);
1536
1537         if (adev->dm.dc) {
1538                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1539         } else {
1540                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1541                 goto error;
1542         }
1543
1544         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1545                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1546                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1547         }
1548
1549         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1550                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1551         if (dm_should_disable_stutter(adev->pdev))
1552                 adev->dm.dc->debug.disable_stutter = true;
1553
1554         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1555                 adev->dm.dc->debug.disable_stutter = true;
1556
1557         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1558                 adev->dm.dc->debug.disable_dsc = true;
1559                 adev->dm.dc->debug.disable_dsc_edp = true;
1560         }
1561
1562         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1563                 adev->dm.dc->debug.disable_clock_gate = true;
1564
1565         r = dm_dmub_hw_init(adev);
1566         if (r) {
1567                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1568                 goto error;
1569         }
1570
1571         dc_hardware_init(adev->dm.dc);
1572
1573         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1574         if (!adev->dm.hpd_rx_offload_wq) {
1575                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1576                 goto error;
1577         }
1578
1579         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1580                 struct dc_phy_addr_space_config pa_config;
1581
1582                 mmhub_read_system_context(adev, &pa_config);
1583
1584                 // Call the DC init_memory func
1585                 dc_setup_system_context(adev->dm.dc, &pa_config);
1586         }
1587
1588         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1589         if (!adev->dm.freesync_module) {
1590                 DRM_ERROR(
1591                 "amdgpu: failed to initialize freesync_module.\n");
1592         } else
1593                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1594                                 adev->dm.freesync_module);
1595
1596         amdgpu_dm_init_color_mod();
1597
1598         if (adev->dm.dc->caps.max_links > 0) {
1599                 adev->dm.vblank_control_workqueue =
1600                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1601                 if (!adev->dm.vblank_control_workqueue)
1602                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1603         }
1604
1605 #ifdef CONFIG_DRM_AMD_DC_HDCP
1606         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1607                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1608
1609                 if (!adev->dm.hdcp_workqueue)
1610                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1611                 else
1612                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1613
1614                 dc_init_callbacks(adev->dm.dc, &init_params);
1615         }
1616 #endif
1617 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1618         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1619 #endif
1620         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1621                 init_completion(&adev->dm.dmub_aux_transfer_done);
1622                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1623                 if (!adev->dm.dmub_notify) {
1624                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1625                         goto error;
1626                 }
1627
1628                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1629                 if (!adev->dm.delayed_hpd_wq) {
1630                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1631                         goto error;
1632                 }
1633
1634                 amdgpu_dm_outbox_init(adev);
1635                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1636                         dmub_aux_setconfig_callback, false)) {
1637                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1638                         goto error;
1639                 }
1640                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1641                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1642                         goto error;
1643                 }
1644                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1645                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1646                         goto error;
1647                 }
1648         }
1649
1650         if (amdgpu_dm_initialize_drm_device(adev)) {
1651                 DRM_ERROR(
1652                 "amdgpu: failed to initialize sw for display support.\n");
1653                 goto error;
1654         }
1655
1656         /* create fake encoders for MST */
1657         dm_dp_create_fake_mst_encoders(adev);
1658
1659         /* TODO: Add_display_info? */
1660
1661         /* TODO use dynamic cursor width */
1662         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1663         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1664
1665         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1666                 DRM_ERROR(
1667                 "amdgpu: failed to initialize sw for display support.\n");
1668                 goto error;
1669         }
1670
1671
1672         DRM_DEBUG_DRIVER("KMS initialized.\n");
1673
1674         return 0;
1675 error:
1676         amdgpu_dm_fini(adev);
1677
1678         return -EINVAL;
1679 }
1680
1681 static int amdgpu_dm_early_fini(void *handle)
1682 {
1683         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1684
1685         amdgpu_dm_audio_fini(adev);
1686
1687         return 0;
1688 }
1689
1690 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1691 {
1692         int i;
1693
1694         if (adev->dm.vblank_control_workqueue) {
1695                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1696                 adev->dm.vblank_control_workqueue = NULL;
1697         }
1698
1699         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1700                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1701         }
1702
1703         amdgpu_dm_destroy_drm_device(&adev->dm);
1704
1705 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1706         if (adev->dm.crc_rd_wrk) {
1707                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1708                 kfree(adev->dm.crc_rd_wrk);
1709                 adev->dm.crc_rd_wrk = NULL;
1710         }
1711 #endif
1712 #ifdef CONFIG_DRM_AMD_DC_HDCP
1713         if (adev->dm.hdcp_workqueue) {
1714                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1715                 adev->dm.hdcp_workqueue = NULL;
1716         }
1717
1718         if (adev->dm.dc)
1719                 dc_deinit_callbacks(adev->dm.dc);
1720 #endif
1721
1722         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1723
1724         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1725                 kfree(adev->dm.dmub_notify);
1726                 adev->dm.dmub_notify = NULL;
1727                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1728                 adev->dm.delayed_hpd_wq = NULL;
1729         }
1730
1731         if (adev->dm.dmub_bo)
1732                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1733                                       &adev->dm.dmub_bo_gpu_addr,
1734                                       &adev->dm.dmub_bo_cpu_addr);
1735
1736         if (adev->dm.hpd_rx_offload_wq) {
1737                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1738                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1739                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1740                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1741                         }
1742                 }
1743
1744                 kfree(adev->dm.hpd_rx_offload_wq);
1745                 adev->dm.hpd_rx_offload_wq = NULL;
1746         }
1747
1748         /* DC Destroy TODO: Replace destroy DAL */
1749         if (adev->dm.dc)
1750                 dc_destroy(&adev->dm.dc);
1751         /*
1752          * TODO: pageflip, vlank interrupt
1753          *
1754          * amdgpu_dm_irq_fini(adev);
1755          */
1756
1757         if (adev->dm.cgs_device) {
1758                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1759                 adev->dm.cgs_device = NULL;
1760         }
1761         if (adev->dm.freesync_module) {
1762                 mod_freesync_destroy(adev->dm.freesync_module);
1763                 adev->dm.freesync_module = NULL;
1764         }
1765
1766         mutex_destroy(&adev->dm.audio_lock);
1767         mutex_destroy(&adev->dm.dc_lock);
1768
1769         return;
1770 }
1771
1772 static int load_dmcu_fw(struct amdgpu_device *adev)
1773 {
1774         const char *fw_name_dmcu = NULL;
1775         int r;
1776         const struct dmcu_firmware_header_v1_0 *hdr;
1777
1778         switch(adev->asic_type) {
1779 #if defined(CONFIG_DRM_AMD_DC_SI)
1780         case CHIP_TAHITI:
1781         case CHIP_PITCAIRN:
1782         case CHIP_VERDE:
1783         case CHIP_OLAND:
1784 #endif
1785         case CHIP_BONAIRE:
1786         case CHIP_HAWAII:
1787         case CHIP_KAVERI:
1788         case CHIP_KABINI:
1789         case CHIP_MULLINS:
1790         case CHIP_TONGA:
1791         case CHIP_FIJI:
1792         case CHIP_CARRIZO:
1793         case CHIP_STONEY:
1794         case CHIP_POLARIS11:
1795         case CHIP_POLARIS10:
1796         case CHIP_POLARIS12:
1797         case CHIP_VEGAM:
1798         case CHIP_VEGA10:
1799         case CHIP_VEGA12:
1800         case CHIP_VEGA20:
1801                 return 0;
1802         case CHIP_NAVI12:
1803                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1804                 break;
1805         case CHIP_RAVEN:
1806                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1807                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1808                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1809                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1810                 else
1811                         return 0;
1812                 break;
1813         default:
1814                 switch (adev->ip_versions[DCE_HWIP][0]) {
1815                 case IP_VERSION(2, 0, 2):
1816                 case IP_VERSION(2, 0, 3):
1817                 case IP_VERSION(2, 0, 0):
1818                 case IP_VERSION(2, 1, 0):
1819                 case IP_VERSION(3, 0, 0):
1820                 case IP_VERSION(3, 0, 2):
1821                 case IP_VERSION(3, 0, 3):
1822                 case IP_VERSION(3, 0, 1):
1823                 case IP_VERSION(3, 1, 2):
1824                 case IP_VERSION(3, 1, 3):
1825                 case IP_VERSION(3, 1, 5):
1826                 case IP_VERSION(3, 1, 6):
1827                 case IP_VERSION(3, 2, 0):
1828                 case IP_VERSION(3, 2, 1):
1829                         return 0;
1830                 default:
1831                         break;
1832                 }
1833                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1834                 return -EINVAL;
1835         }
1836
1837         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1838                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1839                 return 0;
1840         }
1841
1842         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1843         if (r == -ENOENT) {
1844                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1845                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1846                 adev->dm.fw_dmcu = NULL;
1847                 return 0;
1848         }
1849         if (r) {
1850                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1851                         fw_name_dmcu);
1852                 return r;
1853         }
1854
1855         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1856         if (r) {
1857                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1858                         fw_name_dmcu);
1859                 release_firmware(adev->dm.fw_dmcu);
1860                 adev->dm.fw_dmcu = NULL;
1861                 return r;
1862         }
1863
1864         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1865         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1866         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1867         adev->firmware.fw_size +=
1868                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1869
1870         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1871         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1872         adev->firmware.fw_size +=
1873                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1874
1875         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1876
1877         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1878
1879         return 0;
1880 }
1881
1882 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1883 {
1884         struct amdgpu_device *adev = ctx;
1885
1886         return dm_read_reg(adev->dm.dc->ctx, address);
1887 }
1888
1889 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1890                                      uint32_t value)
1891 {
1892         struct amdgpu_device *adev = ctx;
1893
1894         return dm_write_reg(adev->dm.dc->ctx, address, value);
1895 }
1896
1897 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1898 {
1899         struct dmub_srv_create_params create_params;
1900         struct dmub_srv_region_params region_params;
1901         struct dmub_srv_region_info region_info;
1902         struct dmub_srv_fb_params fb_params;
1903         struct dmub_srv_fb_info *fb_info;
1904         struct dmub_srv *dmub_srv;
1905         const struct dmcub_firmware_header_v1_0 *hdr;
1906         const char *fw_name_dmub;
1907         enum dmub_asic dmub_asic;
1908         enum dmub_status status;
1909         int r;
1910
1911         switch (adev->ip_versions[DCE_HWIP][0]) {
1912         case IP_VERSION(2, 1, 0):
1913                 dmub_asic = DMUB_ASIC_DCN21;
1914                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1915                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1916                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1917                 break;
1918         case IP_VERSION(3, 0, 0):
1919                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1920                         dmub_asic = DMUB_ASIC_DCN30;
1921                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1922                 } else {
1923                         dmub_asic = DMUB_ASIC_DCN30;
1924                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1925                 }
1926                 break;
1927         case IP_VERSION(3, 0, 1):
1928                 dmub_asic = DMUB_ASIC_DCN301;
1929                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1930                 break;
1931         case IP_VERSION(3, 0, 2):
1932                 dmub_asic = DMUB_ASIC_DCN302;
1933                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1934                 break;
1935         case IP_VERSION(3, 0, 3):
1936                 dmub_asic = DMUB_ASIC_DCN303;
1937                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1938                 break;
1939         case IP_VERSION(3, 1, 2):
1940         case IP_VERSION(3, 1, 3):
1941                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1942                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1943                 break;
1944         case IP_VERSION(3, 1, 5):
1945                 dmub_asic = DMUB_ASIC_DCN315;
1946                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1947                 break;
1948         case IP_VERSION(3, 1, 6):
1949                 dmub_asic = DMUB_ASIC_DCN316;
1950                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1951                 break;
1952         case IP_VERSION(3, 2, 0):
1953                 dmub_asic = DMUB_ASIC_DCN32;
1954                 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
1955                 break;
1956         case IP_VERSION(3, 2, 1):
1957                 dmub_asic = DMUB_ASIC_DCN321;
1958                 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
1959                 break;
1960         default:
1961                 /* ASIC doesn't support DMUB. */
1962                 return 0;
1963         }
1964
1965         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1966         if (r) {
1967                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1968                 return 0;
1969         }
1970
1971         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1972         if (r) {
1973                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1974                 return 0;
1975         }
1976
1977         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1978         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1979
1980         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1981                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1982                         AMDGPU_UCODE_ID_DMCUB;
1983                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1984                         adev->dm.dmub_fw;
1985                 adev->firmware.fw_size +=
1986                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1987
1988                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1989                          adev->dm.dmcub_fw_version);
1990         }
1991
1992
1993         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1994         dmub_srv = adev->dm.dmub_srv;
1995
1996         if (!dmub_srv) {
1997                 DRM_ERROR("Failed to allocate DMUB service!\n");
1998                 return -ENOMEM;
1999         }
2000
2001         memset(&create_params, 0, sizeof(create_params));
2002         create_params.user_ctx = adev;
2003         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2004         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2005         create_params.asic = dmub_asic;
2006
2007         /* Create the DMUB service. */
2008         status = dmub_srv_create(dmub_srv, &create_params);
2009         if (status != DMUB_STATUS_OK) {
2010                 DRM_ERROR("Error creating DMUB service: %d\n", status);
2011                 return -EINVAL;
2012         }
2013
2014         /* Calculate the size of all the regions for the DMUB service. */
2015         memset(&region_params, 0, sizeof(region_params));
2016
2017         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2018                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2019         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2020         region_params.vbios_size = adev->bios_size;
2021         region_params.fw_bss_data = region_params.bss_data_size ?
2022                 adev->dm.dmub_fw->data +
2023                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2024                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2025         region_params.fw_inst_const =
2026                 adev->dm.dmub_fw->data +
2027                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2028                 PSP_HEADER_BYTES;
2029
2030         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2031                                            &region_info);
2032
2033         if (status != DMUB_STATUS_OK) {
2034                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2035                 return -EINVAL;
2036         }
2037
2038         /*
2039          * Allocate a framebuffer based on the total size of all the regions.
2040          * TODO: Move this into GART.
2041          */
2042         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2043                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2044                                     &adev->dm.dmub_bo_gpu_addr,
2045                                     &adev->dm.dmub_bo_cpu_addr);
2046         if (r)
2047                 return r;
2048
2049         /* Rebase the regions on the framebuffer address. */
2050         memset(&fb_params, 0, sizeof(fb_params));
2051         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2052         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2053         fb_params.region_info = &region_info;
2054
2055         adev->dm.dmub_fb_info =
2056                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2057         fb_info = adev->dm.dmub_fb_info;
2058
2059         if (!fb_info) {
2060                 DRM_ERROR(
2061                         "Failed to allocate framebuffer info for DMUB service!\n");
2062                 return -ENOMEM;
2063         }
2064
2065         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2066         if (status != DMUB_STATUS_OK) {
2067                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2068                 return -EINVAL;
2069         }
2070
2071         return 0;
2072 }
2073
2074 static int dm_sw_init(void *handle)
2075 {
2076         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2077         int r;
2078
2079         r = dm_dmub_sw_init(adev);
2080         if (r)
2081                 return r;
2082
2083         return load_dmcu_fw(adev);
2084 }
2085
2086 static int dm_sw_fini(void *handle)
2087 {
2088         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2089
2090         kfree(adev->dm.dmub_fb_info);
2091         adev->dm.dmub_fb_info = NULL;
2092
2093         if (adev->dm.dmub_srv) {
2094                 dmub_srv_destroy(adev->dm.dmub_srv);
2095                 adev->dm.dmub_srv = NULL;
2096         }
2097
2098         release_firmware(adev->dm.dmub_fw);
2099         adev->dm.dmub_fw = NULL;
2100
2101         release_firmware(adev->dm.fw_dmcu);
2102         adev->dm.fw_dmcu = NULL;
2103
2104         return 0;
2105 }
2106
2107 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2108 {
2109         struct amdgpu_dm_connector *aconnector;
2110         struct drm_connector *connector;
2111         struct drm_connector_list_iter iter;
2112         int ret = 0;
2113
2114         drm_connector_list_iter_begin(dev, &iter);
2115         drm_for_each_connector_iter(connector, &iter) {
2116                 aconnector = to_amdgpu_dm_connector(connector);
2117                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2118                     aconnector->mst_mgr.aux) {
2119                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2120                                          aconnector,
2121                                          aconnector->base.base.id);
2122
2123                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2124                         if (ret < 0) {
2125                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2126                                 aconnector->dc_link->type =
2127                                         dc_connection_single;
2128                                 break;
2129                         }
2130                 }
2131         }
2132         drm_connector_list_iter_end(&iter);
2133
2134         return ret;
2135 }
2136
2137 static int dm_late_init(void *handle)
2138 {
2139         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2140
2141         struct dmcu_iram_parameters params;
2142         unsigned int linear_lut[16];
2143         int i;
2144         struct dmcu *dmcu = NULL;
2145
2146         dmcu = adev->dm.dc->res_pool->dmcu;
2147
2148         for (i = 0; i < 16; i++)
2149                 linear_lut[i] = 0xFFFF * i / 15;
2150
2151         params.set = 0;
2152         params.backlight_ramping_override = false;
2153         params.backlight_ramping_start = 0xCCCC;
2154         params.backlight_ramping_reduction = 0xCCCCCCCC;
2155         params.backlight_lut_array_size = 16;
2156         params.backlight_lut_array = linear_lut;
2157
2158         /* Min backlight level after ABM reduction,  Don't allow below 1%
2159          * 0xFFFF x 0.01 = 0x28F
2160          */
2161         params.min_abm_backlight = 0x28F;
2162         /* In the case where abm is implemented on dmcub,
2163         * dmcu object will be null.
2164         * ABM 2.4 and up are implemented on dmcub.
2165         */
2166         if (dmcu) {
2167                 if (!dmcu_load_iram(dmcu, params))
2168                         return -EINVAL;
2169         } else if (adev->dm.dc->ctx->dmub_srv) {
2170                 struct dc_link *edp_links[MAX_NUM_EDP];
2171                 int edp_num;
2172
2173                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2174                 for (i = 0; i < edp_num; i++) {
2175                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2176                                 return -EINVAL;
2177                 }
2178         }
2179
2180         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2181 }
2182
2183 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2184 {
2185         struct amdgpu_dm_connector *aconnector;
2186         struct drm_connector *connector;
2187         struct drm_connector_list_iter iter;
2188         struct drm_dp_mst_topology_mgr *mgr;
2189         int ret;
2190         bool need_hotplug = false;
2191
2192         drm_connector_list_iter_begin(dev, &iter);
2193         drm_for_each_connector_iter(connector, &iter) {
2194                 aconnector = to_amdgpu_dm_connector(connector);
2195                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2196                     aconnector->mst_port)
2197                         continue;
2198
2199                 mgr = &aconnector->mst_mgr;
2200
2201                 if (suspend) {
2202                         drm_dp_mst_topology_mgr_suspend(mgr);
2203                 } else {
2204                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2205                         if (ret < 0) {
2206                                 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2207                                         aconnector->dc_link);
2208                                 need_hotplug = true;
2209                         }
2210                 }
2211         }
2212         drm_connector_list_iter_end(&iter);
2213
2214         if (need_hotplug)
2215                 drm_kms_helper_hotplug_event(dev);
2216 }
2217
2218 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2219 {
2220         int ret = 0;
2221
2222         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2223          * on window driver dc implementation.
2224          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2225          * should be passed to smu during boot up and resume from s3.
2226          * boot up: dc calculate dcn watermark clock settings within dc_create,
2227          * dcn20_resource_construct
2228          * then call pplib functions below to pass the settings to smu:
2229          * smu_set_watermarks_for_clock_ranges
2230          * smu_set_watermarks_table
2231          * navi10_set_watermarks_table
2232          * smu_write_watermarks_table
2233          *
2234          * For Renoir, clock settings of dcn watermark are also fixed values.
2235          * dc has implemented different flow for window driver:
2236          * dc_hardware_init / dc_set_power_state
2237          * dcn10_init_hw
2238          * notify_wm_ranges
2239          * set_wm_ranges
2240          * -- Linux
2241          * smu_set_watermarks_for_clock_ranges
2242          * renoir_set_watermarks_table
2243          * smu_write_watermarks_table
2244          *
2245          * For Linux,
2246          * dc_hardware_init -> amdgpu_dm_init
2247          * dc_set_power_state --> dm_resume
2248          *
2249          * therefore, this function apply to navi10/12/14 but not Renoir
2250          * *
2251          */
2252         switch (adev->ip_versions[DCE_HWIP][0]) {
2253         case IP_VERSION(2, 0, 2):
2254         case IP_VERSION(2, 0, 0):
2255                 break;
2256         default:
2257                 return 0;
2258         }
2259
2260         ret = amdgpu_dpm_write_watermarks_table(adev);
2261         if (ret) {
2262                 DRM_ERROR("Failed to update WMTABLE!\n");
2263                 return ret;
2264         }
2265
2266         return 0;
2267 }
2268
2269 /**
2270  * dm_hw_init() - Initialize DC device
2271  * @handle: The base driver device containing the amdgpu_dm device.
2272  *
2273  * Initialize the &struct amdgpu_display_manager device. This involves calling
2274  * the initializers of each DM component, then populating the struct with them.
2275  *
2276  * Although the function implies hardware initialization, both hardware and
2277  * software are initialized here. Splitting them out to their relevant init
2278  * hooks is a future TODO item.
2279  *
2280  * Some notable things that are initialized here:
2281  *
2282  * - Display Core, both software and hardware
2283  * - DC modules that we need (freesync and color management)
2284  * - DRM software states
2285  * - Interrupt sources and handlers
2286  * - Vblank support
2287  * - Debug FS entries, if enabled
2288  */
2289 static int dm_hw_init(void *handle)
2290 {
2291         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2292         /* Create DAL display manager */
2293         amdgpu_dm_init(adev);
2294         amdgpu_dm_hpd_init(adev);
2295
2296         return 0;
2297 }
2298
2299 /**
2300  * dm_hw_fini() - Teardown DC device
2301  * @handle: The base driver device containing the amdgpu_dm device.
2302  *
2303  * Teardown components within &struct amdgpu_display_manager that require
2304  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2305  * were loaded. Also flush IRQ workqueues and disable them.
2306  */
2307 static int dm_hw_fini(void *handle)
2308 {
2309         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2310
2311         amdgpu_dm_hpd_fini(adev);
2312
2313         amdgpu_dm_irq_fini(adev);
2314         amdgpu_dm_fini(adev);
2315         return 0;
2316 }
2317
2318
2319 static int dm_enable_vblank(struct drm_crtc *crtc);
2320 static void dm_disable_vblank(struct drm_crtc *crtc);
2321
2322 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2323                                  struct dc_state *state, bool enable)
2324 {
2325         enum dc_irq_source irq_source;
2326         struct amdgpu_crtc *acrtc;
2327         int rc = -EBUSY;
2328         int i = 0;
2329
2330         for (i = 0; i < state->stream_count; i++) {
2331                 acrtc = get_crtc_by_otg_inst(
2332                                 adev, state->stream_status[i].primary_otg_inst);
2333
2334                 if (acrtc && state->stream_status[i].plane_count != 0) {
2335                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2336                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2337                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2338                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2339                         if (rc)
2340                                 DRM_WARN("Failed to %s pflip interrupts\n",
2341                                          enable ? "enable" : "disable");
2342
2343                         if (enable) {
2344                                 rc = dm_enable_vblank(&acrtc->base);
2345                                 if (rc)
2346                                         DRM_WARN("Failed to enable vblank interrupts\n");
2347                         } else {
2348                                 dm_disable_vblank(&acrtc->base);
2349                         }
2350
2351                 }
2352         }
2353
2354 }
2355
2356 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2357 {
2358         struct dc_state *context = NULL;
2359         enum dc_status res = DC_ERROR_UNEXPECTED;
2360         int i;
2361         struct dc_stream_state *del_streams[MAX_PIPES];
2362         int del_streams_count = 0;
2363
2364         memset(del_streams, 0, sizeof(del_streams));
2365
2366         context = dc_create_state(dc);
2367         if (context == NULL)
2368                 goto context_alloc_fail;
2369
2370         dc_resource_state_copy_construct_current(dc, context);
2371
2372         /* First remove from context all streams */
2373         for (i = 0; i < context->stream_count; i++) {
2374                 struct dc_stream_state *stream = context->streams[i];
2375
2376                 del_streams[del_streams_count++] = stream;
2377         }
2378
2379         /* Remove all planes for removed streams and then remove the streams */
2380         for (i = 0; i < del_streams_count; i++) {
2381                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2382                         res = DC_FAIL_DETACH_SURFACES;
2383                         goto fail;
2384                 }
2385
2386                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2387                 if (res != DC_OK)
2388                         goto fail;
2389         }
2390
2391         res = dc_commit_state(dc, context);
2392
2393 fail:
2394         dc_release_state(context);
2395
2396 context_alloc_fail:
2397         return res;
2398 }
2399
2400 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2401 {
2402         int i;
2403
2404         if (dm->hpd_rx_offload_wq) {
2405                 for (i = 0; i < dm->dc->caps.max_links; i++)
2406                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2407         }
2408 }
2409
2410 static int dm_suspend(void *handle)
2411 {
2412         struct amdgpu_device *adev = handle;
2413         struct amdgpu_display_manager *dm = &adev->dm;
2414         int ret = 0;
2415
2416         if (amdgpu_in_reset(adev)) {
2417                 mutex_lock(&dm->dc_lock);
2418
2419                 dc_allow_idle_optimizations(adev->dm.dc, false);
2420
2421                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2422
2423                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2424
2425                 amdgpu_dm_commit_zero_streams(dm->dc);
2426
2427                 amdgpu_dm_irq_suspend(adev);
2428
2429                 hpd_rx_irq_work_suspend(dm);
2430
2431                 return ret;
2432         }
2433
2434         WARN_ON(adev->dm.cached_state);
2435         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2436
2437         s3_handle_mst(adev_to_drm(adev), true);
2438
2439         amdgpu_dm_irq_suspend(adev);
2440
2441         hpd_rx_irq_work_suspend(dm);
2442
2443         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2444
2445         return 0;
2446 }
2447
2448 struct amdgpu_dm_connector *
2449 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2450                                              struct drm_crtc *crtc)
2451 {
2452         uint32_t i;
2453         struct drm_connector_state *new_con_state;
2454         struct drm_connector *connector;
2455         struct drm_crtc *crtc_from_state;
2456
2457         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2458                 crtc_from_state = new_con_state->crtc;
2459
2460                 if (crtc_from_state == crtc)
2461                         return to_amdgpu_dm_connector(connector);
2462         }
2463
2464         return NULL;
2465 }
2466
2467 static void emulated_link_detect(struct dc_link *link)
2468 {
2469         struct dc_sink_init_data sink_init_data = { 0 };
2470         struct display_sink_capability sink_caps = { 0 };
2471         enum dc_edid_status edid_status;
2472         struct dc_context *dc_ctx = link->ctx;
2473         struct dc_sink *sink = NULL;
2474         struct dc_sink *prev_sink = NULL;
2475
2476         link->type = dc_connection_none;
2477         prev_sink = link->local_sink;
2478
2479         if (prev_sink)
2480                 dc_sink_release(prev_sink);
2481
2482         switch (link->connector_signal) {
2483         case SIGNAL_TYPE_HDMI_TYPE_A: {
2484                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2485                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2486                 break;
2487         }
2488
2489         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2490                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2491                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2492                 break;
2493         }
2494
2495         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2496                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2497                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2498                 break;
2499         }
2500
2501         case SIGNAL_TYPE_LVDS: {
2502                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2503                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2504                 break;
2505         }
2506
2507         case SIGNAL_TYPE_EDP: {
2508                 sink_caps.transaction_type =
2509                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2510                 sink_caps.signal = SIGNAL_TYPE_EDP;
2511                 break;
2512         }
2513
2514         case SIGNAL_TYPE_DISPLAY_PORT: {
2515                 sink_caps.transaction_type =
2516                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2517                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2518                 break;
2519         }
2520
2521         default:
2522                 DC_ERROR("Invalid connector type! signal:%d\n",
2523                         link->connector_signal);
2524                 return;
2525         }
2526
2527         sink_init_data.link = link;
2528         sink_init_data.sink_signal = sink_caps.signal;
2529
2530         sink = dc_sink_create(&sink_init_data);
2531         if (!sink) {
2532                 DC_ERROR("Failed to create sink!\n");
2533                 return;
2534         }
2535
2536         /* dc_sink_create returns a new reference */
2537         link->local_sink = sink;
2538
2539         edid_status = dm_helpers_read_local_edid(
2540                         link->ctx,
2541                         link,
2542                         sink);
2543
2544         if (edid_status != EDID_OK)
2545                 DC_ERROR("Failed to read EDID");
2546
2547 }
2548
2549 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2550                                      struct amdgpu_display_manager *dm)
2551 {
2552         struct {
2553                 struct dc_surface_update surface_updates[MAX_SURFACES];
2554                 struct dc_plane_info plane_infos[MAX_SURFACES];
2555                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2556                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2557                 struct dc_stream_update stream_update;
2558         } * bundle;
2559         int k, m;
2560
2561         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2562
2563         if (!bundle) {
2564                 dm_error("Failed to allocate update bundle\n");
2565                 goto cleanup;
2566         }
2567
2568         for (k = 0; k < dc_state->stream_count; k++) {
2569                 bundle->stream_update.stream = dc_state->streams[k];
2570
2571                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2572                         bundle->surface_updates[m].surface =
2573                                 dc_state->stream_status->plane_states[m];
2574                         bundle->surface_updates[m].surface->force_full_update =
2575                                 true;
2576                 }
2577                 dc_commit_updates_for_stream(
2578                         dm->dc, bundle->surface_updates,
2579                         dc_state->stream_status->plane_count,
2580                         dc_state->streams[k], &bundle->stream_update, dc_state);
2581         }
2582
2583 cleanup:
2584         kfree(bundle);
2585
2586         return;
2587 }
2588
2589 static int dm_resume(void *handle)
2590 {
2591         struct amdgpu_device *adev = handle;
2592         struct drm_device *ddev = adev_to_drm(adev);
2593         struct amdgpu_display_manager *dm = &adev->dm;
2594         struct amdgpu_dm_connector *aconnector;
2595         struct drm_connector *connector;
2596         struct drm_connector_list_iter iter;
2597         struct drm_crtc *crtc;
2598         struct drm_crtc_state *new_crtc_state;
2599         struct dm_crtc_state *dm_new_crtc_state;
2600         struct drm_plane *plane;
2601         struct drm_plane_state *new_plane_state;
2602         struct dm_plane_state *dm_new_plane_state;
2603         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2604         enum dc_connection_type new_connection_type = dc_connection_none;
2605         struct dc_state *dc_state;
2606         int i, r, j;
2607
2608         if (amdgpu_in_reset(adev)) {
2609                 dc_state = dm->cached_dc_state;
2610
2611                 /*
2612                  * The dc->current_state is backed up into dm->cached_dc_state
2613                  * before we commit 0 streams.
2614                  *
2615                  * DC will clear link encoder assignments on the real state
2616                  * but the changes won't propagate over to the copy we made
2617                  * before the 0 streams commit.
2618                  *
2619                  * DC expects that link encoder assignments are *not* valid
2620                  * when committing a state, so as a workaround we can copy
2621                  * off of the current state.
2622                  *
2623                  * We lose the previous assignments, but we had already
2624                  * commit 0 streams anyway.
2625                  */
2626                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2627
2628                 if (dc_enable_dmub_notifications(adev->dm.dc))
2629                         amdgpu_dm_outbox_init(adev);
2630
2631                 r = dm_dmub_hw_init(adev);
2632                 if (r)
2633                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2634
2635                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2636                 dc_resume(dm->dc);
2637
2638                 amdgpu_dm_irq_resume_early(adev);
2639
2640                 for (i = 0; i < dc_state->stream_count; i++) {
2641                         dc_state->streams[i]->mode_changed = true;
2642                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2643                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2644                                         = 0xffffffff;
2645                         }
2646                 }
2647
2648                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2649
2650                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2651
2652                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2653
2654                 dc_release_state(dm->cached_dc_state);
2655                 dm->cached_dc_state = NULL;
2656
2657                 amdgpu_dm_irq_resume_late(adev);
2658
2659                 mutex_unlock(&dm->dc_lock);
2660
2661                 return 0;
2662         }
2663         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2664         dc_release_state(dm_state->context);
2665         dm_state->context = dc_create_state(dm->dc);
2666         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2667         dc_resource_state_construct(dm->dc, dm_state->context);
2668
2669         /* Re-enable outbox interrupts for DPIA. */
2670         if (dc_enable_dmub_notifications(adev->dm.dc))
2671                 amdgpu_dm_outbox_init(adev);
2672
2673         /* Before powering on DC we need to re-initialize DMUB. */
2674         dm_dmub_hw_resume(adev);
2675
2676         /* power on hardware */
2677         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2678
2679         /* program HPD filter */
2680         dc_resume(dm->dc);
2681
2682         /*
2683          * early enable HPD Rx IRQ, should be done before set mode as short
2684          * pulse interrupts are used for MST
2685          */
2686         amdgpu_dm_irq_resume_early(adev);
2687
2688         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2689         s3_handle_mst(ddev, false);
2690
2691         /* Do detection*/
2692         drm_connector_list_iter_begin(ddev, &iter);
2693         drm_for_each_connector_iter(connector, &iter) {
2694                 aconnector = to_amdgpu_dm_connector(connector);
2695
2696                 /*
2697                  * this is the case when traversing through already created
2698                  * MST connectors, should be skipped
2699                  */
2700                 if (aconnector->dc_link &&
2701                     aconnector->dc_link->type == dc_connection_mst_branch)
2702                         continue;
2703
2704                 mutex_lock(&aconnector->hpd_lock);
2705                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2706                         DRM_ERROR("KMS: Failed to detect connector\n");
2707
2708                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2709                         emulated_link_detect(aconnector->dc_link);
2710                 else
2711                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2712
2713                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2714                         aconnector->fake_enable = false;
2715
2716                 if (aconnector->dc_sink)
2717                         dc_sink_release(aconnector->dc_sink);
2718                 aconnector->dc_sink = NULL;
2719                 amdgpu_dm_update_connector_after_detect(aconnector);
2720                 mutex_unlock(&aconnector->hpd_lock);
2721         }
2722         drm_connector_list_iter_end(&iter);
2723
2724         /* Force mode set in atomic commit */
2725         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2726                 new_crtc_state->active_changed = true;
2727
2728         /*
2729          * atomic_check is expected to create the dc states. We need to release
2730          * them here, since they were duplicated as part of the suspend
2731          * procedure.
2732          */
2733         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2734                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2735                 if (dm_new_crtc_state->stream) {
2736                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2737                         dc_stream_release(dm_new_crtc_state->stream);
2738                         dm_new_crtc_state->stream = NULL;
2739                 }
2740         }
2741
2742         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2743                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2744                 if (dm_new_plane_state->dc_state) {
2745                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2746                         dc_plane_state_release(dm_new_plane_state->dc_state);
2747                         dm_new_plane_state->dc_state = NULL;
2748                 }
2749         }
2750
2751         drm_atomic_helper_resume(ddev, dm->cached_state);
2752
2753         dm->cached_state = NULL;
2754
2755         amdgpu_dm_irq_resume_late(adev);
2756
2757         amdgpu_dm_smu_write_watermarks_table(adev);
2758
2759         return 0;
2760 }
2761
2762 /**
2763  * DOC: DM Lifecycle
2764  *
2765  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2766  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2767  * the base driver's device list to be initialized and torn down accordingly.
2768  *
2769  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2770  */
2771
2772 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2773         .name = "dm",
2774         .early_init = dm_early_init,
2775         .late_init = dm_late_init,
2776         .sw_init = dm_sw_init,
2777         .sw_fini = dm_sw_fini,
2778         .early_fini = amdgpu_dm_early_fini,
2779         .hw_init = dm_hw_init,
2780         .hw_fini = dm_hw_fini,
2781         .suspend = dm_suspend,
2782         .resume = dm_resume,
2783         .is_idle = dm_is_idle,
2784         .wait_for_idle = dm_wait_for_idle,
2785         .check_soft_reset = dm_check_soft_reset,
2786         .soft_reset = dm_soft_reset,
2787         .set_clockgating_state = dm_set_clockgating_state,
2788         .set_powergating_state = dm_set_powergating_state,
2789 };
2790
2791 const struct amdgpu_ip_block_version dm_ip_block =
2792 {
2793         .type = AMD_IP_BLOCK_TYPE_DCE,
2794         .major = 1,
2795         .minor = 0,
2796         .rev = 0,
2797         .funcs = &amdgpu_dm_funcs,
2798 };
2799
2800
2801 /**
2802  * DOC: atomic
2803  *
2804  * *WIP*
2805  */
2806
2807 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2808         .fb_create = amdgpu_display_user_framebuffer_create,
2809         .get_format_info = amd_get_format_info,
2810         .output_poll_changed = drm_fb_helper_output_poll_changed,
2811         .atomic_check = amdgpu_dm_atomic_check,
2812         .atomic_commit = drm_atomic_helper_commit,
2813 };
2814
2815 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2816         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2817 };
2818
2819 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2820 {
2821         u32 max_avg, min_cll, max, min, q, r;
2822         struct amdgpu_dm_backlight_caps *caps;
2823         struct amdgpu_display_manager *dm;
2824         struct drm_connector *conn_base;
2825         struct amdgpu_device *adev;
2826         struct dc_link *link = NULL;
2827         static const u8 pre_computed_values[] = {
2828                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2829                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2830         int i;
2831
2832         if (!aconnector || !aconnector->dc_link)
2833                 return;
2834
2835         link = aconnector->dc_link;
2836         if (link->connector_signal != SIGNAL_TYPE_EDP)
2837                 return;
2838
2839         conn_base = &aconnector->base;
2840         adev = drm_to_adev(conn_base->dev);
2841         dm = &adev->dm;
2842         for (i = 0; i < dm->num_of_edps; i++) {
2843                 if (link == dm->backlight_link[i])
2844                         break;
2845         }
2846         if (i >= dm->num_of_edps)
2847                 return;
2848         caps = &dm->backlight_caps[i];
2849         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2850         caps->aux_support = false;
2851         max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2852         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2853
2854         if (caps->ext_caps->bits.oled == 1 /*||
2855             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2856             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2857                 caps->aux_support = true;
2858
2859         if (amdgpu_backlight == 0)
2860                 caps->aux_support = false;
2861         else if (amdgpu_backlight == 1)
2862                 caps->aux_support = true;
2863
2864         /* From the specification (CTA-861-G), for calculating the maximum
2865          * luminance we need to use:
2866          *      Luminance = 50*2**(CV/32)
2867          * Where CV is a one-byte value.
2868          * For calculating this expression we may need float point precision;
2869          * to avoid this complexity level, we take advantage that CV is divided
2870          * by a constant. From the Euclids division algorithm, we know that CV
2871          * can be written as: CV = 32*q + r. Next, we replace CV in the
2872          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2873          * need to pre-compute the value of r/32. For pre-computing the values
2874          * We just used the following Ruby line:
2875          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2876          * The results of the above expressions can be verified at
2877          * pre_computed_values.
2878          */
2879         q = max_avg >> 5;
2880         r = max_avg % 32;
2881         max = (1 << q) * pre_computed_values[r];
2882
2883         // min luminance: maxLum * (CV/255)^2 / 100
2884         q = DIV_ROUND_CLOSEST(min_cll, 255);
2885         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2886
2887         caps->aux_max_input_signal = max;
2888         caps->aux_min_input_signal = min;
2889 }
2890
2891 void amdgpu_dm_update_connector_after_detect(
2892                 struct amdgpu_dm_connector *aconnector)
2893 {
2894         struct drm_connector *connector = &aconnector->base;
2895         struct drm_device *dev = connector->dev;
2896         struct dc_sink *sink;
2897
2898         /* MST handled by drm_mst framework */
2899         if (aconnector->mst_mgr.mst_state == true)
2900                 return;
2901
2902         sink = aconnector->dc_link->local_sink;
2903         if (sink)
2904                 dc_sink_retain(sink);
2905
2906         /*
2907          * Edid mgmt connector gets first update only in mode_valid hook and then
2908          * the connector sink is set to either fake or physical sink depends on link status.
2909          * Skip if already done during boot.
2910          */
2911         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2912                         && aconnector->dc_em_sink) {
2913
2914                 /*
2915                  * For S3 resume with headless use eml_sink to fake stream
2916                  * because on resume connector->sink is set to NULL
2917                  */
2918                 mutex_lock(&dev->mode_config.mutex);
2919
2920                 if (sink) {
2921                         if (aconnector->dc_sink) {
2922                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2923                                 /*
2924                                  * retain and release below are used to
2925                                  * bump up refcount for sink because the link doesn't point
2926                                  * to it anymore after disconnect, so on next crtc to connector
2927                                  * reshuffle by UMD we will get into unwanted dc_sink release
2928                                  */
2929                                 dc_sink_release(aconnector->dc_sink);
2930                         }
2931                         aconnector->dc_sink = sink;
2932                         dc_sink_retain(aconnector->dc_sink);
2933                         amdgpu_dm_update_freesync_caps(connector,
2934                                         aconnector->edid);
2935                 } else {
2936                         amdgpu_dm_update_freesync_caps(connector, NULL);
2937                         if (!aconnector->dc_sink) {
2938                                 aconnector->dc_sink = aconnector->dc_em_sink;
2939                                 dc_sink_retain(aconnector->dc_sink);
2940                         }
2941                 }
2942
2943                 mutex_unlock(&dev->mode_config.mutex);
2944
2945                 if (sink)
2946                         dc_sink_release(sink);
2947                 return;
2948         }
2949
2950         /*
2951          * TODO: temporary guard to look for proper fix
2952          * if this sink is MST sink, we should not do anything
2953          */
2954         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2955                 dc_sink_release(sink);
2956                 return;
2957         }
2958
2959         if (aconnector->dc_sink == sink) {
2960                 /*
2961                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2962                  * Do nothing!!
2963                  */
2964                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2965                                 aconnector->connector_id);
2966                 if (sink)
2967                         dc_sink_release(sink);
2968                 return;
2969         }
2970
2971         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2972                 aconnector->connector_id, aconnector->dc_sink, sink);
2973
2974         mutex_lock(&dev->mode_config.mutex);
2975
2976         /*
2977          * 1. Update status of the drm connector
2978          * 2. Send an event and let userspace tell us what to do
2979          */
2980         if (sink) {
2981                 /*
2982                  * TODO: check if we still need the S3 mode update workaround.
2983                  * If yes, put it here.
2984                  */
2985                 if (aconnector->dc_sink) {
2986                         amdgpu_dm_update_freesync_caps(connector, NULL);
2987                         dc_sink_release(aconnector->dc_sink);
2988                 }
2989
2990                 aconnector->dc_sink = sink;
2991                 dc_sink_retain(aconnector->dc_sink);
2992                 if (sink->dc_edid.length == 0) {
2993                         aconnector->edid = NULL;
2994                         if (aconnector->dc_link->aux_mode) {
2995                                 drm_dp_cec_unset_edid(
2996                                         &aconnector->dm_dp_aux.aux);
2997                         }
2998                 } else {
2999                         aconnector->edid =
3000                                 (struct edid *)sink->dc_edid.raw_edid;
3001
3002                         if (aconnector->dc_link->aux_mode)
3003                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3004                                                     aconnector->edid);
3005                 }
3006
3007                 drm_connector_update_edid_property(connector, aconnector->edid);
3008                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3009                 update_connector_ext_caps(aconnector);
3010         } else {
3011                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3012                 amdgpu_dm_update_freesync_caps(connector, NULL);
3013                 drm_connector_update_edid_property(connector, NULL);
3014                 aconnector->num_modes = 0;
3015                 dc_sink_release(aconnector->dc_sink);
3016                 aconnector->dc_sink = NULL;
3017                 aconnector->edid = NULL;
3018 #ifdef CONFIG_DRM_AMD_DC_HDCP
3019                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3020                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3021                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3022 #endif
3023         }
3024
3025         mutex_unlock(&dev->mode_config.mutex);
3026
3027         update_subconnector_property(aconnector);
3028
3029         if (sink)
3030                 dc_sink_release(sink);
3031 }
3032
3033 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3034 {
3035         struct drm_connector *connector = &aconnector->base;
3036         struct drm_device *dev = connector->dev;
3037         enum dc_connection_type new_connection_type = dc_connection_none;
3038         struct amdgpu_device *adev = drm_to_adev(dev);
3039 #ifdef CONFIG_DRM_AMD_DC_HDCP
3040         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3041 #endif
3042
3043         if (adev->dm.disable_hpd_irq)
3044                 return;
3045
3046         /*
3047          * In case of failure or MST no need to update connector status or notify the OS
3048          * since (for MST case) MST does this in its own context.
3049          */
3050         mutex_lock(&aconnector->hpd_lock);
3051
3052 #ifdef CONFIG_DRM_AMD_DC_HDCP
3053         if (adev->dm.hdcp_workqueue) {
3054                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3055                 dm_con_state->update_hdcp = true;
3056         }
3057 #endif
3058         if (aconnector->fake_enable)
3059                 aconnector->fake_enable = false;
3060
3061         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3062                 DRM_ERROR("KMS: Failed to detect connector\n");
3063
3064         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3065                 emulated_link_detect(aconnector->dc_link);
3066
3067                 drm_modeset_lock_all(dev);
3068                 dm_restore_drm_connector_state(dev, connector);
3069                 drm_modeset_unlock_all(dev);
3070
3071                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3072                         drm_kms_helper_connector_hotplug_event(connector);
3073
3074         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3075                 amdgpu_dm_update_connector_after_detect(aconnector);
3076
3077                 drm_modeset_lock_all(dev);
3078                 dm_restore_drm_connector_state(dev, connector);
3079                 drm_modeset_unlock_all(dev);
3080
3081                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3082                         drm_kms_helper_connector_hotplug_event(connector);
3083         }
3084         mutex_unlock(&aconnector->hpd_lock);
3085
3086 }
3087
3088 static void handle_hpd_irq(void *param)
3089 {
3090         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3091
3092         handle_hpd_irq_helper(aconnector);
3093
3094 }
3095
3096 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3097 {
3098         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3099         uint8_t dret;
3100         bool new_irq_handled = false;
3101         int dpcd_addr;
3102         int dpcd_bytes_to_read;
3103
3104         const int max_process_count = 30;
3105         int process_count = 0;
3106
3107         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3108
3109         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3110                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3111                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3112                 dpcd_addr = DP_SINK_COUNT;
3113         } else {
3114                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3115                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3116                 dpcd_addr = DP_SINK_COUNT_ESI;
3117         }
3118
3119         dret = drm_dp_dpcd_read(
3120                 &aconnector->dm_dp_aux.aux,
3121                 dpcd_addr,
3122                 esi,
3123                 dpcd_bytes_to_read);
3124
3125         while (dret == dpcd_bytes_to_read &&
3126                 process_count < max_process_count) {
3127                 uint8_t retry;
3128                 dret = 0;
3129
3130                 process_count++;
3131
3132                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3133                 /* handle HPD short pulse irq */
3134                 if (aconnector->mst_mgr.mst_state)
3135                         drm_dp_mst_hpd_irq(
3136                                 &aconnector->mst_mgr,
3137                                 esi,
3138                                 &new_irq_handled);
3139
3140                 if (new_irq_handled) {
3141                         /* ACK at DPCD to notify down stream */
3142                         const int ack_dpcd_bytes_to_write =
3143                                 dpcd_bytes_to_read - 1;
3144
3145                         for (retry = 0; retry < 3; retry++) {
3146                                 uint8_t wret;
3147
3148                                 wret = drm_dp_dpcd_write(
3149                                         &aconnector->dm_dp_aux.aux,
3150                                         dpcd_addr + 1,
3151                                         &esi[1],
3152                                         ack_dpcd_bytes_to_write);
3153                                 if (wret == ack_dpcd_bytes_to_write)
3154                                         break;
3155                         }
3156
3157                         /* check if there is new irq to be handled */
3158                         dret = drm_dp_dpcd_read(
3159                                 &aconnector->dm_dp_aux.aux,
3160                                 dpcd_addr,
3161                                 esi,
3162                                 dpcd_bytes_to_read);
3163
3164                         new_irq_handled = false;
3165                 } else {
3166                         break;
3167                 }
3168         }
3169
3170         if (process_count == max_process_count)
3171                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3172 }
3173
3174 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3175                                                         union hpd_irq_data hpd_irq_data)
3176 {
3177         struct hpd_rx_irq_offload_work *offload_work =
3178                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3179
3180         if (!offload_work) {
3181                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3182                 return;
3183         }
3184
3185         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3186         offload_work->data = hpd_irq_data;
3187         offload_work->offload_wq = offload_wq;
3188
3189         queue_work(offload_wq->wq, &offload_work->work);
3190         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3191 }
3192
3193 static void handle_hpd_rx_irq(void *param)
3194 {
3195         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3196         struct drm_connector *connector = &aconnector->base;
3197         struct drm_device *dev = connector->dev;
3198         struct dc_link *dc_link = aconnector->dc_link;
3199         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3200         bool result = false;
3201         enum dc_connection_type new_connection_type = dc_connection_none;
3202         struct amdgpu_device *adev = drm_to_adev(dev);
3203         union hpd_irq_data hpd_irq_data;
3204         bool link_loss = false;
3205         bool has_left_work = false;
3206         int idx = aconnector->base.index;
3207         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3208
3209         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3210
3211         if (adev->dm.disable_hpd_irq)
3212                 return;
3213
3214         /*
3215          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3216          * conflict, after implement i2c helper, this mutex should be
3217          * retired.
3218          */
3219         mutex_lock(&aconnector->hpd_lock);
3220
3221         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3222                                                 &link_loss, true, &has_left_work);
3223
3224         if (!has_left_work)
3225                 goto out;
3226
3227         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3228                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3229                 goto out;
3230         }
3231
3232         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3233                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3234                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3235                         dm_handle_mst_sideband_msg(aconnector);
3236                         goto out;
3237                 }
3238
3239                 if (link_loss) {
3240                         bool skip = false;
3241
3242                         spin_lock(&offload_wq->offload_lock);
3243                         skip = offload_wq->is_handling_link_loss;
3244
3245                         if (!skip)
3246                                 offload_wq->is_handling_link_loss = true;
3247
3248                         spin_unlock(&offload_wq->offload_lock);
3249
3250                         if (!skip)
3251                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3252
3253                         goto out;
3254                 }
3255         }
3256
3257 out:
3258         if (result && !is_mst_root_connector) {
3259                 /* Downstream Port status changed. */
3260                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3261                         DRM_ERROR("KMS: Failed to detect connector\n");
3262
3263                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3264                         emulated_link_detect(dc_link);
3265
3266                         if (aconnector->fake_enable)
3267                                 aconnector->fake_enable = false;
3268
3269                         amdgpu_dm_update_connector_after_detect(aconnector);
3270
3271
3272                         drm_modeset_lock_all(dev);
3273                         dm_restore_drm_connector_state(dev, connector);
3274                         drm_modeset_unlock_all(dev);
3275
3276                         drm_kms_helper_connector_hotplug_event(connector);
3277                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3278
3279                         if (aconnector->fake_enable)
3280                                 aconnector->fake_enable = false;
3281
3282                         amdgpu_dm_update_connector_after_detect(aconnector);
3283
3284
3285                         drm_modeset_lock_all(dev);
3286                         dm_restore_drm_connector_state(dev, connector);
3287                         drm_modeset_unlock_all(dev);
3288
3289                         drm_kms_helper_connector_hotplug_event(connector);
3290                 }
3291         }
3292 #ifdef CONFIG_DRM_AMD_DC_HDCP
3293         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3294                 if (adev->dm.hdcp_workqueue)
3295                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3296         }
3297 #endif
3298
3299         if (dc_link->type != dc_connection_mst_branch)
3300                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3301
3302         mutex_unlock(&aconnector->hpd_lock);
3303 }
3304
3305 static void register_hpd_handlers(struct amdgpu_device *adev)
3306 {
3307         struct drm_device *dev = adev_to_drm(adev);
3308         struct drm_connector *connector;
3309         struct amdgpu_dm_connector *aconnector;
3310         const struct dc_link *dc_link;
3311         struct dc_interrupt_params int_params = {0};
3312
3313         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3314         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3315
3316         list_for_each_entry(connector,
3317                         &dev->mode_config.connector_list, head) {
3318
3319                 aconnector = to_amdgpu_dm_connector(connector);
3320                 dc_link = aconnector->dc_link;
3321
3322                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3323                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3324                         int_params.irq_source = dc_link->irq_source_hpd;
3325
3326                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3327                                         handle_hpd_irq,
3328                                         (void *) aconnector);
3329                 }
3330
3331                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3332
3333                         /* Also register for DP short pulse (hpd_rx). */
3334                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3335                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3336
3337                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3338                                         handle_hpd_rx_irq,
3339                                         (void *) aconnector);
3340
3341                         if (adev->dm.hpd_rx_offload_wq)
3342                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3343                                         aconnector;
3344                 }
3345         }
3346 }
3347
3348 #if defined(CONFIG_DRM_AMD_DC_SI)
3349 /* Register IRQ sources and initialize IRQ callbacks */
3350 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3351 {
3352         struct dc *dc = adev->dm.dc;
3353         struct common_irq_params *c_irq_params;
3354         struct dc_interrupt_params int_params = {0};
3355         int r;
3356         int i;
3357         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3358
3359         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3360         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3361
3362         /*
3363          * Actions of amdgpu_irq_add_id():
3364          * 1. Register a set() function with base driver.
3365          *    Base driver will call set() function to enable/disable an
3366          *    interrupt in DC hardware.
3367          * 2. Register amdgpu_dm_irq_handler().
3368          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3369          *    coming from DC hardware.
3370          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3371          *    for acknowledging and handling. */
3372
3373         /* Use VBLANK interrupt */
3374         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3375                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3376                 if (r) {
3377                         DRM_ERROR("Failed to add crtc irq id!\n");
3378                         return r;
3379                 }
3380
3381                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3382                 int_params.irq_source =
3383                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3384
3385                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3386
3387                 c_irq_params->adev = adev;
3388                 c_irq_params->irq_src = int_params.irq_source;
3389
3390                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3391                                 dm_crtc_high_irq, c_irq_params);
3392         }
3393
3394         /* Use GRPH_PFLIP interrupt */
3395         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3396                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3397                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3398                 if (r) {
3399                         DRM_ERROR("Failed to add page flip irq id!\n");
3400                         return r;
3401                 }
3402
3403                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3404                 int_params.irq_source =
3405                         dc_interrupt_to_irq_source(dc, i, 0);
3406
3407                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3408
3409                 c_irq_params->adev = adev;
3410                 c_irq_params->irq_src = int_params.irq_source;
3411
3412                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3413                                 dm_pflip_high_irq, c_irq_params);
3414
3415         }
3416
3417         /* HPD */
3418         r = amdgpu_irq_add_id(adev, client_id,
3419                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3420         if (r) {
3421                 DRM_ERROR("Failed to add hpd irq id!\n");
3422                 return r;
3423         }
3424
3425         register_hpd_handlers(adev);
3426
3427         return 0;
3428 }
3429 #endif
3430
3431 /* Register IRQ sources and initialize IRQ callbacks */
3432 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3433 {
3434         struct dc *dc = adev->dm.dc;
3435         struct common_irq_params *c_irq_params;
3436         struct dc_interrupt_params int_params = {0};
3437         int r;
3438         int i;
3439         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3440
3441         if (adev->family >= AMDGPU_FAMILY_AI)
3442                 client_id = SOC15_IH_CLIENTID_DCE;
3443
3444         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3445         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3446
3447         /*
3448          * Actions of amdgpu_irq_add_id():
3449          * 1. Register a set() function with base driver.
3450          *    Base driver will call set() function to enable/disable an
3451          *    interrupt in DC hardware.
3452          * 2. Register amdgpu_dm_irq_handler().
3453          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3454          *    coming from DC hardware.
3455          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3456          *    for acknowledging and handling. */
3457
3458         /* Use VBLANK interrupt */
3459         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3460                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3461                 if (r) {
3462                         DRM_ERROR("Failed to add crtc irq id!\n");
3463                         return r;
3464                 }
3465
3466                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3467                 int_params.irq_source =
3468                         dc_interrupt_to_irq_source(dc, i, 0);
3469
3470                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3471
3472                 c_irq_params->adev = adev;
3473                 c_irq_params->irq_src = int_params.irq_source;
3474
3475                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3476                                 dm_crtc_high_irq, c_irq_params);
3477         }
3478
3479         /* Use VUPDATE interrupt */
3480         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3481                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3482                 if (r) {
3483                         DRM_ERROR("Failed to add vupdate irq id!\n");
3484                         return r;
3485                 }
3486
3487                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3488                 int_params.irq_source =
3489                         dc_interrupt_to_irq_source(dc, i, 0);
3490
3491                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3492
3493                 c_irq_params->adev = adev;
3494                 c_irq_params->irq_src = int_params.irq_source;
3495
3496                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3497                                 dm_vupdate_high_irq, c_irq_params);
3498         }
3499
3500         /* Use GRPH_PFLIP interrupt */
3501         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3502                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3503                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3504                 if (r) {
3505                         DRM_ERROR("Failed to add page flip irq id!\n");
3506                         return r;
3507                 }
3508
3509                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3510                 int_params.irq_source =
3511                         dc_interrupt_to_irq_source(dc, i, 0);
3512
3513                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3514
3515                 c_irq_params->adev = adev;
3516                 c_irq_params->irq_src = int_params.irq_source;
3517
3518                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3519                                 dm_pflip_high_irq, c_irq_params);
3520
3521         }
3522
3523         /* HPD */
3524         r = amdgpu_irq_add_id(adev, client_id,
3525                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3526         if (r) {
3527                 DRM_ERROR("Failed to add hpd irq id!\n");
3528                 return r;
3529         }
3530
3531         register_hpd_handlers(adev);
3532
3533         return 0;
3534 }
3535
3536 /* Register IRQ sources and initialize IRQ callbacks */
3537 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3538 {
3539         struct dc *dc = adev->dm.dc;
3540         struct common_irq_params *c_irq_params;
3541         struct dc_interrupt_params int_params = {0};
3542         int r;
3543         int i;
3544 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3545         static const unsigned int vrtl_int_srcid[] = {
3546                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3547                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3548                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3549                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3550                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3551                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3552         };
3553 #endif
3554
3555         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3556         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3557
3558         /*
3559          * Actions of amdgpu_irq_add_id():
3560          * 1. Register a set() function with base driver.
3561          *    Base driver will call set() function to enable/disable an
3562          *    interrupt in DC hardware.
3563          * 2. Register amdgpu_dm_irq_handler().
3564          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3565          *    coming from DC hardware.
3566          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3567          *    for acknowledging and handling.
3568          */
3569
3570         /* Use VSTARTUP interrupt */
3571         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3572                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3573                         i++) {
3574                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3575
3576                 if (r) {
3577                         DRM_ERROR("Failed to add crtc irq id!\n");
3578                         return r;
3579                 }
3580
3581                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3582                 int_params.irq_source =
3583                         dc_interrupt_to_irq_source(dc, i, 0);
3584
3585                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3586
3587                 c_irq_params->adev = adev;
3588                 c_irq_params->irq_src = int_params.irq_source;
3589
3590                 amdgpu_dm_irq_register_interrupt(
3591                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3592         }
3593
3594         /* Use otg vertical line interrupt */
3595 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3596         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3597                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3598                                 vrtl_int_srcid[i], &adev->vline0_irq);
3599
3600                 if (r) {
3601                         DRM_ERROR("Failed to add vline0 irq id!\n");
3602                         return r;
3603                 }
3604
3605                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3606                 int_params.irq_source =
3607                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3608
3609                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3610                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3611                         break;
3612                 }
3613
3614                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3615                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3616
3617                 c_irq_params->adev = adev;
3618                 c_irq_params->irq_src = int_params.irq_source;
3619
3620                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3621                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3622         }
3623 #endif
3624
3625         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3626          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3627          * to trigger at end of each vblank, regardless of state of the lock,
3628          * matching DCE behaviour.
3629          */
3630         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3631              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3632              i++) {
3633                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3634
3635                 if (r) {
3636                         DRM_ERROR("Failed to add vupdate irq id!\n");
3637                         return r;
3638                 }
3639
3640                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3641                 int_params.irq_source =
3642                         dc_interrupt_to_irq_source(dc, i, 0);
3643
3644                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3645
3646                 c_irq_params->adev = adev;
3647                 c_irq_params->irq_src = int_params.irq_source;
3648
3649                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3650                                 dm_vupdate_high_irq, c_irq_params);
3651         }
3652
3653         /* Use GRPH_PFLIP interrupt */
3654         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3655                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3656                         i++) {
3657                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3658                 if (r) {
3659                         DRM_ERROR("Failed to add page flip irq id!\n");
3660                         return r;
3661                 }
3662
3663                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3664                 int_params.irq_source =
3665                         dc_interrupt_to_irq_source(dc, i, 0);
3666
3667                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3668
3669                 c_irq_params->adev = adev;
3670                 c_irq_params->irq_src = int_params.irq_source;
3671
3672                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3673                                 dm_pflip_high_irq, c_irq_params);
3674
3675         }
3676
3677         /* HPD */
3678         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3679                         &adev->hpd_irq);
3680         if (r) {
3681                 DRM_ERROR("Failed to add hpd irq id!\n");
3682                 return r;
3683         }
3684
3685         register_hpd_handlers(adev);
3686
3687         return 0;
3688 }
3689 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3690 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3691 {
3692         struct dc *dc = adev->dm.dc;
3693         struct common_irq_params *c_irq_params;
3694         struct dc_interrupt_params int_params = {0};
3695         int r, i;
3696
3697         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3698         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3699
3700         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3701                         &adev->dmub_outbox_irq);
3702         if (r) {
3703                 DRM_ERROR("Failed to add outbox irq id!\n");
3704                 return r;
3705         }
3706
3707         if (dc->ctx->dmub_srv) {
3708                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3709                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3710                 int_params.irq_source =
3711                 dc_interrupt_to_irq_source(dc, i, 0);
3712
3713                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3714
3715                 c_irq_params->adev = adev;
3716                 c_irq_params->irq_src = int_params.irq_source;
3717
3718                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3719                                 dm_dmub_outbox1_low_irq, c_irq_params);
3720         }
3721
3722         return 0;
3723 }
3724
3725 /*
3726  * Acquires the lock for the atomic state object and returns
3727  * the new atomic state.
3728  *
3729  * This should only be called during atomic check.
3730  */
3731 int dm_atomic_get_state(struct drm_atomic_state *state,
3732                         struct dm_atomic_state **dm_state)
3733 {
3734         struct drm_device *dev = state->dev;
3735         struct amdgpu_device *adev = drm_to_adev(dev);
3736         struct amdgpu_display_manager *dm = &adev->dm;
3737         struct drm_private_state *priv_state;
3738
3739         if (*dm_state)
3740                 return 0;
3741
3742         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3743         if (IS_ERR(priv_state))
3744                 return PTR_ERR(priv_state);
3745
3746         *dm_state = to_dm_atomic_state(priv_state);
3747
3748         return 0;
3749 }
3750
3751 static struct dm_atomic_state *
3752 dm_atomic_get_new_state(struct drm_atomic_state *state)
3753 {
3754         struct drm_device *dev = state->dev;
3755         struct amdgpu_device *adev = drm_to_adev(dev);
3756         struct amdgpu_display_manager *dm = &adev->dm;
3757         struct drm_private_obj *obj;
3758         struct drm_private_state *new_obj_state;
3759         int i;
3760
3761         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3762                 if (obj->funcs == dm->atomic_obj.funcs)
3763                         return to_dm_atomic_state(new_obj_state);
3764         }
3765
3766         return NULL;
3767 }
3768
3769 static struct drm_private_state *
3770 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3771 {
3772         struct dm_atomic_state *old_state, *new_state;
3773
3774         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3775         if (!new_state)
3776                 return NULL;
3777
3778         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3779
3780         old_state = to_dm_atomic_state(obj->state);
3781
3782         if (old_state && old_state->context)
3783                 new_state->context = dc_copy_state(old_state->context);
3784
3785         if (!new_state->context) {
3786                 kfree(new_state);
3787                 return NULL;
3788         }
3789
3790         return &new_state->base;
3791 }
3792
3793 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3794                                     struct drm_private_state *state)
3795 {
3796         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3797
3798         if (dm_state && dm_state->context)
3799                 dc_release_state(dm_state->context);
3800
3801         kfree(dm_state);
3802 }
3803
3804 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3805         .atomic_duplicate_state = dm_atomic_duplicate_state,
3806         .atomic_destroy_state = dm_atomic_destroy_state,
3807 };
3808
3809 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3810 {
3811         struct dm_atomic_state *state;
3812         int r;
3813
3814         adev->mode_info.mode_config_initialized = true;
3815
3816         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3817         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3818
3819         adev_to_drm(adev)->mode_config.max_width = 16384;
3820         adev_to_drm(adev)->mode_config.max_height = 16384;
3821
3822         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3823         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3824         /* indicates support for immediate flip */
3825         adev_to_drm(adev)->mode_config.async_page_flip = true;
3826
3827         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3828
3829         state = kzalloc(sizeof(*state), GFP_KERNEL);
3830         if (!state)
3831                 return -ENOMEM;
3832
3833         state->context = dc_create_state(adev->dm.dc);
3834         if (!state->context) {
3835                 kfree(state);
3836                 return -ENOMEM;
3837         }
3838
3839         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3840
3841         drm_atomic_private_obj_init(adev_to_drm(adev),
3842                                     &adev->dm.atomic_obj,
3843                                     &state->base,
3844                                     &dm_atomic_state_funcs);
3845
3846         r = amdgpu_display_modeset_create_props(adev);
3847         if (r) {
3848                 dc_release_state(state->context);
3849                 kfree(state);
3850                 return r;
3851         }
3852
3853         r = amdgpu_dm_audio_init(adev);
3854         if (r) {
3855                 dc_release_state(state->context);
3856                 kfree(state);
3857                 return r;
3858         }
3859
3860         return 0;
3861 }
3862
3863 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3864 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3865 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3866
3867 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3868                                             int bl_idx)
3869 {
3870 #if defined(CONFIG_ACPI)
3871         struct amdgpu_dm_backlight_caps caps;
3872
3873         memset(&caps, 0, sizeof(caps));
3874
3875         if (dm->backlight_caps[bl_idx].caps_valid)
3876                 return;
3877
3878         amdgpu_acpi_get_backlight_caps(&caps);
3879         if (caps.caps_valid) {
3880                 dm->backlight_caps[bl_idx].caps_valid = true;
3881                 if (caps.aux_support)
3882                         return;
3883                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3884                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3885         } else {
3886                 dm->backlight_caps[bl_idx].min_input_signal =
3887                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3888                 dm->backlight_caps[bl_idx].max_input_signal =
3889                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3890         }
3891 #else
3892         if (dm->backlight_caps[bl_idx].aux_support)
3893                 return;
3894
3895         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3896         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3897 #endif
3898 }
3899
3900 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3901                                 unsigned *min, unsigned *max)
3902 {
3903         if (!caps)
3904                 return 0;
3905
3906         if (caps->aux_support) {
3907                 // Firmware limits are in nits, DC API wants millinits.
3908                 *max = 1000 * caps->aux_max_input_signal;
3909                 *min = 1000 * caps->aux_min_input_signal;
3910         } else {
3911                 // Firmware limits are 8-bit, PWM control is 16-bit.
3912                 *max = 0x101 * caps->max_input_signal;
3913                 *min = 0x101 * caps->min_input_signal;
3914         }
3915         return 1;
3916 }
3917
3918 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3919                                         uint32_t brightness)
3920 {
3921         unsigned min, max;
3922
3923         if (!get_brightness_range(caps, &min, &max))
3924                 return brightness;
3925
3926         // Rescale 0..255 to min..max
3927         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3928                                        AMDGPU_MAX_BL_LEVEL);
3929 }
3930
3931 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3932                                       uint32_t brightness)
3933 {
3934         unsigned min, max;
3935
3936         if (!get_brightness_range(caps, &min, &max))
3937                 return brightness;
3938
3939         if (brightness < min)
3940                 return 0;
3941         // Rescale min..max to 0..255
3942         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3943                                  max - min);
3944 }
3945
3946 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3947                                          int bl_idx,
3948                                          u32 user_brightness)
3949 {
3950         struct amdgpu_dm_backlight_caps caps;
3951         struct dc_link *link;
3952         u32 brightness;
3953         bool rc;
3954
3955         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3956         caps = dm->backlight_caps[bl_idx];
3957
3958         dm->brightness[bl_idx] = user_brightness;
3959         /* update scratch register */
3960         if (bl_idx == 0)
3961                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3962         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3963         link = (struct dc_link *)dm->backlight_link[bl_idx];
3964
3965         /* Change brightness based on AUX property */
3966         if (caps.aux_support) {
3967                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3968                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3969                 if (!rc)
3970                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3971         } else {
3972                 rc = dc_link_set_backlight_level(link, brightness, 0);
3973                 if (!rc)
3974                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3975         }
3976
3977         if (rc)
3978                 dm->actual_brightness[bl_idx] = user_brightness;
3979 }
3980
3981 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3982 {
3983         struct amdgpu_display_manager *dm = bl_get_data(bd);
3984         int i;
3985
3986         for (i = 0; i < dm->num_of_edps; i++) {
3987                 if (bd == dm->backlight_dev[i])
3988                         break;
3989         }
3990         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3991                 i = 0;
3992         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3993
3994         return 0;
3995 }
3996
3997 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3998                                          int bl_idx)
3999 {
4000         struct amdgpu_dm_backlight_caps caps;
4001         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4002
4003         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4004         caps = dm->backlight_caps[bl_idx];
4005
4006         if (caps.aux_support) {
4007                 u32 avg, peak;
4008                 bool rc;
4009
4010                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4011                 if (!rc)
4012                         return dm->brightness[bl_idx];
4013                 return convert_brightness_to_user(&caps, avg);
4014         } else {
4015                 int ret = dc_link_get_backlight_level(link);
4016
4017                 if (ret == DC_ERROR_UNEXPECTED)
4018                         return dm->brightness[bl_idx];
4019                 return convert_brightness_to_user(&caps, ret);
4020         }
4021 }
4022
4023 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4024 {
4025         struct amdgpu_display_manager *dm = bl_get_data(bd);
4026         int i;
4027
4028         for (i = 0; i < dm->num_of_edps; i++) {
4029                 if (bd == dm->backlight_dev[i])
4030                         break;
4031         }
4032         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4033                 i = 0;
4034         return amdgpu_dm_backlight_get_level(dm, i);
4035 }
4036
4037 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4038         .options = BL_CORE_SUSPENDRESUME,
4039         .get_brightness = amdgpu_dm_backlight_get_brightness,
4040         .update_status  = amdgpu_dm_backlight_update_status,
4041 };
4042
4043 static void
4044 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4045 {
4046         char bl_name[16];
4047         struct backlight_properties props = { 0 };
4048
4049         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4050         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4051
4052         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4053         props.brightness = AMDGPU_MAX_BL_LEVEL;
4054         props.type = BACKLIGHT_RAW;
4055
4056         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4057                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4058
4059         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4060                                                                        adev_to_drm(dm->adev)->dev,
4061                                                                        dm,
4062                                                                        &amdgpu_dm_backlight_ops,
4063                                                                        &props);
4064
4065         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4066                 DRM_ERROR("DM: Backlight registration failed!\n");
4067         else
4068                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4069 }
4070
4071 static int initialize_plane(struct amdgpu_display_manager *dm,
4072                             struct amdgpu_mode_info *mode_info, int plane_id,
4073                             enum drm_plane_type plane_type,
4074                             const struct dc_plane_cap *plane_cap)
4075 {
4076         struct drm_plane *plane;
4077         unsigned long possible_crtcs;
4078         int ret = 0;
4079
4080         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4081         if (!plane) {
4082                 DRM_ERROR("KMS: Failed to allocate plane\n");
4083                 return -ENOMEM;
4084         }
4085         plane->type = plane_type;
4086
4087         /*
4088          * HACK: IGT tests expect that the primary plane for a CRTC
4089          * can only have one possible CRTC. Only expose support for
4090          * any CRTC if they're not going to be used as a primary plane
4091          * for a CRTC - like overlay or underlay planes.
4092          */
4093         possible_crtcs = 1 << plane_id;
4094         if (plane_id >= dm->dc->caps.max_streams)
4095                 possible_crtcs = 0xff;
4096
4097         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4098
4099         if (ret) {
4100                 DRM_ERROR("KMS: Failed to initialize plane\n");
4101                 kfree(plane);
4102                 return ret;
4103         }
4104
4105         if (mode_info)
4106                 mode_info->planes[plane_id] = plane;
4107
4108         return ret;
4109 }
4110
4111
4112 static void register_backlight_device(struct amdgpu_display_manager *dm,
4113                                       struct dc_link *link)
4114 {
4115         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4116             link->type != dc_connection_none) {
4117                 /*
4118                  * Event if registration failed, we should continue with
4119                  * DM initialization because not having a backlight control
4120                  * is better then a black screen.
4121                  */
4122                 if (!dm->backlight_dev[dm->num_of_edps])
4123                         amdgpu_dm_register_backlight_device(dm);
4124
4125                 if (dm->backlight_dev[dm->num_of_edps]) {
4126                         dm->backlight_link[dm->num_of_edps] = link;
4127                         dm->num_of_edps++;
4128                 }
4129         }
4130 }
4131
4132
4133 /*
4134  * In this architecture, the association
4135  * connector -> encoder -> crtc
4136  * id not really requried. The crtc and connector will hold the
4137  * display_index as an abstraction to use with DAL component
4138  *
4139  * Returns 0 on success
4140  */
4141 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4142 {
4143         struct amdgpu_display_manager *dm = &adev->dm;
4144         int32_t i;
4145         struct amdgpu_dm_connector *aconnector = NULL;
4146         struct amdgpu_encoder *aencoder = NULL;
4147         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4148         uint32_t link_cnt;
4149         int32_t primary_planes;
4150         enum dc_connection_type new_connection_type = dc_connection_none;
4151         const struct dc_plane_cap *plane;
4152         bool psr_feature_enabled = false;
4153
4154         dm->display_indexes_num = dm->dc->caps.max_streams;
4155         /* Update the actual used number of crtc */
4156         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4157
4158         link_cnt = dm->dc->caps.max_links;
4159         if (amdgpu_dm_mode_config_init(dm->adev)) {
4160                 DRM_ERROR("DM: Failed to initialize mode config\n");
4161                 return -EINVAL;
4162         }
4163
4164         /* There is one primary plane per CRTC */
4165         primary_planes = dm->dc->caps.max_streams;
4166         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4167
4168         /*
4169          * Initialize primary planes, implicit planes for legacy IOCTLS.
4170          * Order is reversed to match iteration order in atomic check.
4171          */
4172         for (i = (primary_planes - 1); i >= 0; i--) {
4173                 plane = &dm->dc->caps.planes[i];
4174
4175                 if (initialize_plane(dm, mode_info, i,
4176                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4177                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4178                         goto fail;
4179                 }
4180         }
4181
4182         /*
4183          * Initialize overlay planes, index starting after primary planes.
4184          * These planes have a higher DRM index than the primary planes since
4185          * they should be considered as having a higher z-order.
4186          * Order is reversed to match iteration order in atomic check.
4187          *
4188          * Only support DCN for now, and only expose one so we don't encourage
4189          * userspace to use up all the pipes.
4190          */
4191         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4192                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4193
4194                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4195                         continue;
4196
4197                 if (!plane->blends_with_above || !plane->blends_with_below)
4198                         continue;
4199
4200                 if (!plane->pixel_format_support.argb8888)
4201                         continue;
4202
4203                 if (initialize_plane(dm, NULL, primary_planes + i,
4204                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4205                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4206                         goto fail;
4207                 }
4208
4209                 /* Only create one overlay plane. */
4210                 break;
4211         }
4212
4213         for (i = 0; i < dm->dc->caps.max_streams; i++)
4214                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4215                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4216                         goto fail;
4217                 }
4218
4219         /* Use Outbox interrupt */
4220         switch (adev->ip_versions[DCE_HWIP][0]) {
4221         case IP_VERSION(3, 0, 0):
4222         case IP_VERSION(3, 1, 2):
4223         case IP_VERSION(3, 1, 3):
4224         case IP_VERSION(3, 1, 5):
4225         case IP_VERSION(3, 1, 6):
4226         case IP_VERSION(3, 2, 0):
4227         case IP_VERSION(3, 2, 1):
4228         case IP_VERSION(2, 1, 0):
4229                 if (register_outbox_irq_handlers(dm->adev)) {
4230                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4231                         goto fail;
4232                 }
4233                 break;
4234         default:
4235                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4236                               adev->ip_versions[DCE_HWIP][0]);
4237         }
4238
4239         /* Determine whether to enable PSR support by default. */
4240         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4241                 switch (adev->ip_versions[DCE_HWIP][0]) {
4242                 case IP_VERSION(3, 1, 2):
4243                 case IP_VERSION(3, 1, 3):
4244                 case IP_VERSION(3, 1, 5):
4245                 case IP_VERSION(3, 1, 6):
4246                 case IP_VERSION(3, 2, 0):
4247                 case IP_VERSION(3, 2, 1):
4248                         psr_feature_enabled = true;
4249                         break;
4250                 default:
4251                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4252                         break;
4253                 }
4254         }
4255
4256         /* loops over all connectors on the board */
4257         for (i = 0; i < link_cnt; i++) {
4258                 struct dc_link *link = NULL;
4259
4260                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4261                         DRM_ERROR(
4262                                 "KMS: Cannot support more than %d display indexes\n",
4263                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4264                         continue;
4265                 }
4266
4267                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4268                 if (!aconnector)
4269                         goto fail;
4270
4271                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4272                 if (!aencoder)
4273                         goto fail;
4274
4275                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4276                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4277                         goto fail;
4278                 }
4279
4280                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4281                         DRM_ERROR("KMS: Failed to initialize connector\n");
4282                         goto fail;
4283                 }
4284
4285                 link = dc_get_link_at_index(dm->dc, i);
4286
4287                 if (!dc_link_detect_sink(link, &new_connection_type))
4288                         DRM_ERROR("KMS: Failed to detect connector\n");
4289
4290                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4291                         emulated_link_detect(link);
4292                         amdgpu_dm_update_connector_after_detect(aconnector);
4293
4294                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4295                         amdgpu_dm_update_connector_after_detect(aconnector);
4296                         register_backlight_device(dm, link);
4297                         if (dm->num_of_edps)
4298                                 update_connector_ext_caps(aconnector);
4299                         if (psr_feature_enabled)
4300                                 amdgpu_dm_set_psr_caps(link);
4301
4302                         /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4303                          * PSR is also supported.
4304                          */
4305                         if (link->psr_settings.psr_feature_enabled)
4306                                 adev_to_drm(adev)->vblank_disable_immediate = false;
4307                 }
4308
4309
4310         }
4311
4312         /* Software is initialized. Now we can register interrupt handlers. */
4313         switch (adev->asic_type) {
4314 #if defined(CONFIG_DRM_AMD_DC_SI)
4315         case CHIP_TAHITI:
4316         case CHIP_PITCAIRN:
4317         case CHIP_VERDE:
4318         case CHIP_OLAND:
4319                 if (dce60_register_irq_handlers(dm->adev)) {
4320                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4321                         goto fail;
4322                 }
4323                 break;
4324 #endif
4325         case CHIP_BONAIRE:
4326         case CHIP_HAWAII:
4327         case CHIP_KAVERI:
4328         case CHIP_KABINI:
4329         case CHIP_MULLINS:
4330         case CHIP_TONGA:
4331         case CHIP_FIJI:
4332         case CHIP_CARRIZO:
4333         case CHIP_STONEY:
4334         case CHIP_POLARIS11:
4335         case CHIP_POLARIS10:
4336         case CHIP_POLARIS12:
4337         case CHIP_VEGAM:
4338         case CHIP_VEGA10:
4339         case CHIP_VEGA12:
4340         case CHIP_VEGA20:
4341                 if (dce110_register_irq_handlers(dm->adev)) {
4342                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4343                         goto fail;
4344                 }
4345                 break;
4346         default:
4347                 switch (adev->ip_versions[DCE_HWIP][0]) {
4348                 case IP_VERSION(1, 0, 0):
4349                 case IP_VERSION(1, 0, 1):
4350                 case IP_VERSION(2, 0, 2):
4351                 case IP_VERSION(2, 0, 3):
4352                 case IP_VERSION(2, 0, 0):
4353                 case IP_VERSION(2, 1, 0):
4354                 case IP_VERSION(3, 0, 0):
4355                 case IP_VERSION(3, 0, 2):
4356                 case IP_VERSION(3, 0, 3):
4357                 case IP_VERSION(3, 0, 1):
4358                 case IP_VERSION(3, 1, 2):
4359                 case IP_VERSION(3, 1, 3):
4360                 case IP_VERSION(3, 1, 5):
4361                 case IP_VERSION(3, 1, 6):
4362                 case IP_VERSION(3, 2, 0):
4363                 case IP_VERSION(3, 2, 1):
4364                         if (dcn10_register_irq_handlers(dm->adev)) {
4365                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4366                                 goto fail;
4367                         }
4368                         break;
4369                 default:
4370                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4371                                         adev->ip_versions[DCE_HWIP][0]);
4372                         goto fail;
4373                 }
4374                 break;
4375         }
4376
4377         return 0;
4378 fail:
4379         kfree(aencoder);
4380         kfree(aconnector);
4381
4382         return -EINVAL;
4383 }
4384
4385 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4386 {
4387         drm_atomic_private_obj_fini(&dm->atomic_obj);
4388         return;
4389 }
4390
4391 /******************************************************************************
4392  * amdgpu_display_funcs functions
4393  *****************************************************************************/
4394
4395 /*
4396  * dm_bandwidth_update - program display watermarks
4397  *
4398  * @adev: amdgpu_device pointer
4399  *
4400  * Calculate and program the display watermarks and line buffer allocation.
4401  */
4402 static void dm_bandwidth_update(struct amdgpu_device *adev)
4403 {
4404         /* TODO: implement later */
4405 }
4406
4407 static const struct amdgpu_display_funcs dm_display_funcs = {
4408         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4409         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4410         .backlight_set_level = NULL, /* never called for DC */
4411         .backlight_get_level = NULL, /* never called for DC */
4412         .hpd_sense = NULL,/* called unconditionally */
4413         .hpd_set_polarity = NULL, /* called unconditionally */
4414         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4415         .page_flip_get_scanoutpos =
4416                 dm_crtc_get_scanoutpos,/* called unconditionally */
4417         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4418         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4419 };
4420
4421 #if defined(CONFIG_DEBUG_KERNEL_DC)
4422
4423 static ssize_t s3_debug_store(struct device *device,
4424                               struct device_attribute *attr,
4425                               const char *buf,
4426                               size_t count)
4427 {
4428         int ret;
4429         int s3_state;
4430         struct drm_device *drm_dev = dev_get_drvdata(device);
4431         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4432
4433         ret = kstrtoint(buf, 0, &s3_state);
4434
4435         if (ret == 0) {
4436                 if (s3_state) {
4437                         dm_resume(adev);
4438                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4439                 } else
4440                         dm_suspend(adev);
4441         }
4442
4443         return ret == 0 ? count : 0;
4444 }
4445
4446 DEVICE_ATTR_WO(s3_debug);
4447
4448 #endif
4449
4450 static int dm_early_init(void *handle)
4451 {
4452         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4453
4454         switch (adev->asic_type) {
4455 #if defined(CONFIG_DRM_AMD_DC_SI)
4456         case CHIP_TAHITI:
4457         case CHIP_PITCAIRN:
4458         case CHIP_VERDE:
4459                 adev->mode_info.num_crtc = 6;
4460                 adev->mode_info.num_hpd = 6;
4461                 adev->mode_info.num_dig = 6;
4462                 break;
4463         case CHIP_OLAND:
4464                 adev->mode_info.num_crtc = 2;
4465                 adev->mode_info.num_hpd = 2;
4466                 adev->mode_info.num_dig = 2;
4467                 break;
4468 #endif
4469         case CHIP_BONAIRE:
4470         case CHIP_HAWAII:
4471                 adev->mode_info.num_crtc = 6;
4472                 adev->mode_info.num_hpd = 6;
4473                 adev->mode_info.num_dig = 6;
4474                 break;
4475         case CHIP_KAVERI:
4476                 adev->mode_info.num_crtc = 4;
4477                 adev->mode_info.num_hpd = 6;
4478                 adev->mode_info.num_dig = 7;
4479                 break;
4480         case CHIP_KABINI:
4481         case CHIP_MULLINS:
4482                 adev->mode_info.num_crtc = 2;
4483                 adev->mode_info.num_hpd = 6;
4484                 adev->mode_info.num_dig = 6;
4485                 break;
4486         case CHIP_FIJI:
4487         case CHIP_TONGA:
4488                 adev->mode_info.num_crtc = 6;
4489                 adev->mode_info.num_hpd = 6;
4490                 adev->mode_info.num_dig = 7;
4491                 break;
4492         case CHIP_CARRIZO:
4493                 adev->mode_info.num_crtc = 3;
4494                 adev->mode_info.num_hpd = 6;
4495                 adev->mode_info.num_dig = 9;
4496                 break;
4497         case CHIP_STONEY:
4498                 adev->mode_info.num_crtc = 2;
4499                 adev->mode_info.num_hpd = 6;
4500                 adev->mode_info.num_dig = 9;
4501                 break;
4502         case CHIP_POLARIS11:
4503         case CHIP_POLARIS12:
4504                 adev->mode_info.num_crtc = 5;
4505                 adev->mode_info.num_hpd = 5;
4506                 adev->mode_info.num_dig = 5;
4507                 break;
4508         case CHIP_POLARIS10:
4509         case CHIP_VEGAM:
4510                 adev->mode_info.num_crtc = 6;
4511                 adev->mode_info.num_hpd = 6;
4512                 adev->mode_info.num_dig = 6;
4513                 break;
4514         case CHIP_VEGA10:
4515         case CHIP_VEGA12:
4516         case CHIP_VEGA20:
4517                 adev->mode_info.num_crtc = 6;
4518                 adev->mode_info.num_hpd = 6;
4519                 adev->mode_info.num_dig = 6;
4520                 break;
4521         default:
4522
4523                 switch (adev->ip_versions[DCE_HWIP][0]) {
4524                 case IP_VERSION(2, 0, 2):
4525                 case IP_VERSION(3, 0, 0):
4526                         adev->mode_info.num_crtc = 6;
4527                         adev->mode_info.num_hpd = 6;
4528                         adev->mode_info.num_dig = 6;
4529                         break;
4530                 case IP_VERSION(2, 0, 0):
4531                 case IP_VERSION(3, 0, 2):
4532                         adev->mode_info.num_crtc = 5;
4533                         adev->mode_info.num_hpd = 5;
4534                         adev->mode_info.num_dig = 5;
4535                         break;
4536                 case IP_VERSION(2, 0, 3):
4537                 case IP_VERSION(3, 0, 3):
4538                         adev->mode_info.num_crtc = 2;
4539                         adev->mode_info.num_hpd = 2;
4540                         adev->mode_info.num_dig = 2;
4541                         break;
4542                 case IP_VERSION(1, 0, 0):
4543                 case IP_VERSION(1, 0, 1):
4544                 case IP_VERSION(3, 0, 1):
4545                 case IP_VERSION(2, 1, 0):
4546                 case IP_VERSION(3, 1, 2):
4547                 case IP_VERSION(3, 1, 3):
4548                 case IP_VERSION(3, 1, 5):
4549                 case IP_VERSION(3, 1, 6):
4550                 case IP_VERSION(3, 2, 0):
4551                 case IP_VERSION(3, 2, 1):
4552                         adev->mode_info.num_crtc = 4;
4553                         adev->mode_info.num_hpd = 4;
4554                         adev->mode_info.num_dig = 4;
4555                         break;
4556                 default:
4557                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4558                                         adev->ip_versions[DCE_HWIP][0]);
4559                         return -EINVAL;
4560                 }
4561                 break;
4562         }
4563
4564         amdgpu_dm_set_irq_funcs(adev);
4565
4566         if (adev->mode_info.funcs == NULL)
4567                 adev->mode_info.funcs = &dm_display_funcs;
4568
4569         /*
4570          * Note: Do NOT change adev->audio_endpt_rreg and
4571          * adev->audio_endpt_wreg because they are initialised in
4572          * amdgpu_device_init()
4573          */
4574 #if defined(CONFIG_DEBUG_KERNEL_DC)
4575         device_create_file(
4576                 adev_to_drm(adev)->dev,
4577                 &dev_attr_s3_debug);
4578 #endif
4579
4580         return 0;
4581 }
4582
4583 static bool modeset_required(struct drm_crtc_state *crtc_state,
4584                              struct dc_stream_state *new_stream,
4585                              struct dc_stream_state *old_stream)
4586 {
4587         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4588 }
4589
4590 static bool modereset_required(struct drm_crtc_state *crtc_state)
4591 {
4592         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4593 }
4594
4595 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4596 {
4597         drm_encoder_cleanup(encoder);
4598         kfree(encoder);
4599 }
4600
4601 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4602         .destroy = amdgpu_dm_encoder_destroy,
4603 };
4604
4605
4606 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4607                                          struct drm_framebuffer *fb,
4608                                          int *min_downscale, int *max_upscale)
4609 {
4610         struct amdgpu_device *adev = drm_to_adev(dev);
4611         struct dc *dc = adev->dm.dc;
4612         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4613         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4614
4615         switch (fb->format->format) {
4616         case DRM_FORMAT_P010:
4617         case DRM_FORMAT_NV12:
4618         case DRM_FORMAT_NV21:
4619                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4620                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4621                 break;
4622
4623         case DRM_FORMAT_XRGB16161616F:
4624         case DRM_FORMAT_ARGB16161616F:
4625         case DRM_FORMAT_XBGR16161616F:
4626         case DRM_FORMAT_ABGR16161616F:
4627                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4628                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4629                 break;
4630
4631         default:
4632                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4633                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4634                 break;
4635         }
4636
4637         /*
4638          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4639          * scaling factor of 1.0 == 1000 units.
4640          */
4641         if (*max_upscale == 1)
4642                 *max_upscale = 1000;
4643
4644         if (*min_downscale == 1)
4645                 *min_downscale = 1000;
4646 }
4647
4648
4649 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4650                                 const struct drm_plane_state *state,
4651                                 struct dc_scaling_info *scaling_info)
4652 {
4653         int scale_w, scale_h, min_downscale, max_upscale;
4654
4655         memset(scaling_info, 0, sizeof(*scaling_info));
4656
4657         /* Source is fixed 16.16 but we ignore mantissa for now... */
4658         scaling_info->src_rect.x = state->src_x >> 16;
4659         scaling_info->src_rect.y = state->src_y >> 16;
4660
4661         /*
4662          * For reasons we don't (yet) fully understand a non-zero
4663          * src_y coordinate into an NV12 buffer can cause a
4664          * system hang on DCN1x.
4665          * To avoid hangs (and maybe be overly cautious)
4666          * let's reject both non-zero src_x and src_y.
4667          *
4668          * We currently know of only one use-case to reproduce a
4669          * scenario with non-zero src_x and src_y for NV12, which
4670          * is to gesture the YouTube Android app into full screen
4671          * on ChromeOS.
4672          */
4673         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4674             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4675             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4676             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4677                 return -EINVAL;
4678
4679         scaling_info->src_rect.width = state->src_w >> 16;
4680         if (scaling_info->src_rect.width == 0)
4681                 return -EINVAL;
4682
4683         scaling_info->src_rect.height = state->src_h >> 16;
4684         if (scaling_info->src_rect.height == 0)
4685                 return -EINVAL;
4686
4687         scaling_info->dst_rect.x = state->crtc_x;
4688         scaling_info->dst_rect.y = state->crtc_y;
4689
4690         if (state->crtc_w == 0)
4691                 return -EINVAL;
4692
4693         scaling_info->dst_rect.width = state->crtc_w;
4694
4695         if (state->crtc_h == 0)
4696                 return -EINVAL;
4697
4698         scaling_info->dst_rect.height = state->crtc_h;
4699
4700         /* DRM doesn't specify clipping on destination output. */
4701         scaling_info->clip_rect = scaling_info->dst_rect;
4702
4703         /* Validate scaling per-format with DC plane caps */
4704         if (state->plane && state->plane->dev && state->fb) {
4705                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4706                                              &min_downscale, &max_upscale);
4707         } else {
4708                 min_downscale = 250;
4709                 max_upscale = 16000;
4710         }
4711
4712         scale_w = scaling_info->dst_rect.width * 1000 /
4713                   scaling_info->src_rect.width;
4714
4715         if (scale_w < min_downscale || scale_w > max_upscale)
4716                 return -EINVAL;
4717
4718         scale_h = scaling_info->dst_rect.height * 1000 /
4719                   scaling_info->src_rect.height;
4720
4721         if (scale_h < min_downscale || scale_h > max_upscale)
4722                 return -EINVAL;
4723
4724         /*
4725          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4726          * assume reasonable defaults based on the format.
4727          */
4728
4729         return 0;
4730 }
4731
4732 static void
4733 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4734                                  uint64_t tiling_flags)
4735 {
4736         /* Fill GFX8 params */
4737         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4738                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4739
4740                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4741                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4742                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4743                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4744                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4745
4746                 /* XXX fix me for VI */
4747                 tiling_info->gfx8.num_banks = num_banks;
4748                 tiling_info->gfx8.array_mode =
4749                                 DC_ARRAY_2D_TILED_THIN1;
4750                 tiling_info->gfx8.tile_split = tile_split;
4751                 tiling_info->gfx8.bank_width = bankw;
4752                 tiling_info->gfx8.bank_height = bankh;
4753                 tiling_info->gfx8.tile_aspect = mtaspect;
4754                 tiling_info->gfx8.tile_mode =
4755                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4756         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4757                         == DC_ARRAY_1D_TILED_THIN1) {
4758                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4759         }
4760
4761         tiling_info->gfx8.pipe_config =
4762                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4763 }
4764
4765 static void
4766 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4767                                   union dc_tiling_info *tiling_info)
4768 {
4769         tiling_info->gfx9.num_pipes =
4770                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4771         tiling_info->gfx9.num_banks =
4772                 adev->gfx.config.gb_addr_config_fields.num_banks;
4773         tiling_info->gfx9.pipe_interleave =
4774                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4775         tiling_info->gfx9.num_shader_engines =
4776                 adev->gfx.config.gb_addr_config_fields.num_se;
4777         tiling_info->gfx9.max_compressed_frags =
4778                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4779         tiling_info->gfx9.num_rb_per_se =
4780                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4781         tiling_info->gfx9.shaderEnable = 1;
4782         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4783                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4784 }
4785
4786 static int
4787 validate_dcc(struct amdgpu_device *adev,
4788              const enum surface_pixel_format format,
4789              const enum dc_rotation_angle rotation,
4790              const union dc_tiling_info *tiling_info,
4791              const struct dc_plane_dcc_param *dcc,
4792              const struct dc_plane_address *address,
4793              const struct plane_size *plane_size)
4794 {
4795         struct dc *dc = adev->dm.dc;
4796         struct dc_dcc_surface_param input;
4797         struct dc_surface_dcc_cap output;
4798
4799         memset(&input, 0, sizeof(input));
4800         memset(&output, 0, sizeof(output));
4801
4802         if (!dcc->enable)
4803                 return 0;
4804
4805         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4806             !dc->cap_funcs.get_dcc_compression_cap)
4807                 return -EINVAL;
4808
4809         input.format = format;
4810         input.surface_size.width = plane_size->surface_size.width;
4811         input.surface_size.height = plane_size->surface_size.height;
4812         input.swizzle_mode = tiling_info->gfx9.swizzle;
4813
4814         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4815                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4816         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4817                 input.scan = SCAN_DIRECTION_VERTICAL;
4818
4819         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4820                 return -EINVAL;
4821
4822         if (!output.capable)
4823                 return -EINVAL;
4824
4825         if (dcc->independent_64b_blks == 0 &&
4826             output.grph.rgb.independent_64b_blks != 0)
4827                 return -EINVAL;
4828
4829         return 0;
4830 }
4831
4832 static bool
4833 modifier_has_dcc(uint64_t modifier)
4834 {
4835         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4836 }
4837
4838 static unsigned
4839 modifier_gfx9_swizzle_mode(uint64_t modifier)
4840 {
4841         if (modifier == DRM_FORMAT_MOD_LINEAR)
4842                 return 0;
4843
4844         return AMD_FMT_MOD_GET(TILE, modifier);
4845 }
4846
4847 static const struct drm_format_info *
4848 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4849 {
4850         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4851 }
4852
4853 static void
4854 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4855                                     union dc_tiling_info *tiling_info,
4856                                     uint64_t modifier)
4857 {
4858         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4859         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4860         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4861         unsigned int pipes_log2;
4862
4863         pipes_log2 = min(5u, mod_pipe_xor_bits);
4864
4865         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4866
4867         if (!IS_AMD_FMT_MOD(modifier))
4868                 return;
4869
4870         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4871         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4872
4873         if (adev->family >= AMDGPU_FAMILY_NV) {
4874                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4875         } else {
4876                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4877
4878                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4879         }
4880 }
4881
4882 enum dm_micro_swizzle {
4883         MICRO_SWIZZLE_Z = 0,
4884         MICRO_SWIZZLE_S = 1,
4885         MICRO_SWIZZLE_D = 2,
4886         MICRO_SWIZZLE_R = 3
4887 };
4888
4889 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4890                                           uint32_t format,
4891                                           uint64_t modifier)
4892 {
4893         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4894         const struct drm_format_info *info = drm_format_info(format);
4895         int i;
4896
4897         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4898
4899         if (!info)
4900                 return false;
4901
4902         /*
4903          * We always have to allow these modifiers:
4904          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4905          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4906          */
4907         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4908             modifier == DRM_FORMAT_MOD_INVALID) {
4909                 return true;
4910         }
4911
4912         /* Check that the modifier is on the list of the plane's supported modifiers. */
4913         for (i = 0; i < plane->modifier_count; i++) {
4914                 if (modifier == plane->modifiers[i])
4915                         break;
4916         }
4917         if (i == plane->modifier_count)
4918                 return false;
4919
4920         /*
4921          * For D swizzle the canonical modifier depends on the bpp, so check
4922          * it here.
4923          */
4924         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4925             adev->family >= AMDGPU_FAMILY_NV) {
4926                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4927                         return false;
4928         }
4929
4930         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4931             info->cpp[0] < 8)
4932                 return false;
4933
4934         if (modifier_has_dcc(modifier)) {
4935                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4936                 if (info->cpp[0] != 4)
4937                         return false;
4938                 /* We support multi-planar formats, but not when combined with
4939                  * additional DCC metadata planes. */
4940                 if (info->num_planes > 1)
4941                         return false;
4942         }
4943
4944         return true;
4945 }
4946
4947 static void
4948 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4949 {
4950         if (!*mods)
4951                 return;
4952
4953         if (*cap - *size < 1) {
4954                 uint64_t new_cap = *cap * 2;
4955                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4956
4957                 if (!new_mods) {
4958                         kfree(*mods);
4959                         *mods = NULL;
4960                         return;
4961                 }
4962
4963                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4964                 kfree(*mods);
4965                 *mods = new_mods;
4966                 *cap = new_cap;
4967         }
4968
4969         (*mods)[*size] = mod;
4970         *size += 1;
4971 }
4972
4973 static void
4974 add_gfx9_modifiers(const struct amdgpu_device *adev,
4975                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4976 {
4977         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4978         int pipe_xor_bits = min(8, pipes +
4979                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4980         int bank_xor_bits = min(8 - pipe_xor_bits,
4981                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4982         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4983                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4984
4985
4986         if (adev->family == AMDGPU_FAMILY_RV) {
4987                 /* Raven2 and later */
4988                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4989
4990                 /*
4991                  * No _D DCC swizzles yet because we only allow 32bpp, which
4992                  * doesn't support _D on DCN
4993                  */
4994
4995                 if (has_constant_encode) {
4996                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4997                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4998                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4999                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5000                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5001                                     AMD_FMT_MOD_SET(DCC, 1) |
5002                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5003                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5004                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5005                 }
5006
5007                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5008                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5009                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5010                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5011                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5012                             AMD_FMT_MOD_SET(DCC, 1) |
5013                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5014                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5015                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5016
5017                 if (has_constant_encode) {
5018                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5019                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5020                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5021                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5022                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5023                                     AMD_FMT_MOD_SET(DCC, 1) |
5024                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5025                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5026                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5027
5028                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5029                                     AMD_FMT_MOD_SET(RB, rb) |
5030                                     AMD_FMT_MOD_SET(PIPE, pipes));
5031                 }
5032
5033                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5034                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5035                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5036                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5037                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5038                             AMD_FMT_MOD_SET(DCC, 1) |
5039                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5040                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5041                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5042                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5043                             AMD_FMT_MOD_SET(RB, rb) |
5044                             AMD_FMT_MOD_SET(PIPE, pipes));
5045         }
5046
5047         /*
5048          * Only supported for 64bpp on Raven, will be filtered on format in
5049          * dm_plane_format_mod_supported.
5050          */
5051         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5052                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5053                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5054                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5055                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5056
5057         if (adev->family == AMDGPU_FAMILY_RV) {
5058                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5059                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5060                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5061                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5062                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5063         }
5064
5065         /*
5066          * Only supported for 64bpp on Raven, will be filtered on format in
5067          * dm_plane_format_mod_supported.
5068          */
5069         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5070                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5071                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5072
5073         if (adev->family == AMDGPU_FAMILY_RV) {
5074                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5075                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5076                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5077         }
5078 }
5079
5080 static void
5081 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5082                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5083 {
5084         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5085
5086         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5087                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5088                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5089                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5090                     AMD_FMT_MOD_SET(DCC, 1) |
5091                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5092                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5093                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5094
5095         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5096                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5097                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5098                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5099                     AMD_FMT_MOD_SET(DCC, 1) |
5100                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5101                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5102                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5103                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5104
5105         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5106                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5107                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5108                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5109
5110         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5111                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5112                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5113                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5114
5115
5116         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5117         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5118                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5119                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5120
5121         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5122                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5123                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5124 }
5125
5126 static void
5127 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5128                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5129 {
5130         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5131         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5132
5133         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5134                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5135                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5136                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5137                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5138                     AMD_FMT_MOD_SET(DCC, 1) |
5139                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5140                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5141                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5142                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5143
5144         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5145                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5146                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5147                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5148                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5149                     AMD_FMT_MOD_SET(DCC, 1) |
5150                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5151                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5152                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5153
5154         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5155                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5156                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5157                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5158                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5159                     AMD_FMT_MOD_SET(DCC, 1) |
5160                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5161                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5162                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5163                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5164                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5165
5166         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5167                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5168                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5169                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5170                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5171                     AMD_FMT_MOD_SET(DCC, 1) |
5172                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5173                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5174                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5175                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5176
5177         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5178                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5179                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5180                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5181                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5182
5183         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5184                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5185                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5186                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5187                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5188
5189         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5190         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5191                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5192                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5193
5194         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5195                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5196                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5197 }
5198
5199 static void
5200 add_gfx11_modifiers(struct amdgpu_device *adev,
5201                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5202 {
5203         int num_pipes = 0;
5204         int pipe_xor_bits = 0;
5205         int num_pkrs = 0;
5206         int pkrs = 0;
5207         u32 gb_addr_config;
5208         u8 i = 0;
5209         unsigned swizzle_r_x;
5210         uint64_t modifier_r_x;
5211         uint64_t modifier_dcc_best;
5212         uint64_t modifier_dcc_4k;
5213
5214         /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
5215          * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
5216         gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
5217         ASSERT(gb_addr_config != 0);
5218
5219         num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
5220         pkrs = ilog2(num_pkrs);
5221         num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
5222         pipe_xor_bits = ilog2(num_pipes);
5223
5224         for (i = 0; i < 2; i++) {
5225                 /* Insert the best one first. */
5226                 /* R_X swizzle modes are the best for rendering and DCC requires them. */
5227                 if (num_pipes > 16)
5228                         swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
5229                 else
5230                         swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
5231
5232                 modifier_r_x = AMD_FMT_MOD |
5233                                AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5234                                AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5235                                AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
5236                                AMD_FMT_MOD_SET(PACKERS, pkrs);
5237
5238                 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
5239                 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5240                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
5241                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5242                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
5243
5244                 /* DCC settings for 4K and greater resolutions. (required by display hw) */
5245                 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5246                                   AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5247                                   AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5248                                   AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
5249
5250                 add_modifier(mods, size, capacity, modifier_dcc_best);
5251                 add_modifier(mods, size, capacity, modifier_dcc_4k);
5252
5253                 add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5254                 add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5255
5256                 add_modifier(mods, size, capacity, modifier_r_x);
5257         }
5258
5259         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5260              AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5261                          AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
5262 }
5263
5264 static int
5265 get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5266 {
5267         uint64_t size = 0, capacity = 128;
5268         *mods = NULL;
5269
5270         /* We have not hooked up any pre-GFX9 modifiers. */
5271         if (adev->family < AMDGPU_FAMILY_AI)
5272                 return 0;
5273
5274         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5275
5276         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5277                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5278                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5279                 return *mods ? 0 : -ENOMEM;
5280         }
5281
5282         switch (adev->family) {
5283         case AMDGPU_FAMILY_AI:
5284         case AMDGPU_FAMILY_RV:
5285                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5286                 break;
5287         case AMDGPU_FAMILY_NV:
5288         case AMDGPU_FAMILY_VGH:
5289         case AMDGPU_FAMILY_YC:
5290         case AMDGPU_FAMILY_GC_10_3_6:
5291         case AMDGPU_FAMILY_GC_10_3_7:
5292                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5293                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5294                 else
5295                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5296                 break;
5297         case AMDGPU_FAMILY_GC_11_0_0:
5298                 add_gfx11_modifiers(adev, mods, &size, &capacity);
5299                 break;
5300         }
5301
5302         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5303
5304         /* INVALID marks the end of the list. */
5305         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5306
5307         if (!*mods)
5308                 return -ENOMEM;
5309
5310         return 0;
5311 }
5312
5313 static int
5314 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5315                                           const struct amdgpu_framebuffer *afb,
5316                                           const enum surface_pixel_format format,
5317                                           const enum dc_rotation_angle rotation,
5318                                           const struct plane_size *plane_size,
5319                                           union dc_tiling_info *tiling_info,
5320                                           struct dc_plane_dcc_param *dcc,
5321                                           struct dc_plane_address *address,
5322                                           const bool force_disable_dcc)
5323 {
5324         const uint64_t modifier = afb->base.modifier;
5325         int ret = 0;
5326
5327         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5328         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5329
5330         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5331                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5332                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5333                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5334
5335                 dcc->enable = 1;
5336                 dcc->meta_pitch = afb->base.pitches[1];
5337                 dcc->independent_64b_blks = independent_64b_blks;
5338                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5339                         if (independent_64b_blks && independent_128b_blks)
5340                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5341                         else if (independent_128b_blks)
5342                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5343                         else if (independent_64b_blks && !independent_128b_blks)
5344                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5345                         else
5346                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5347                 } else {
5348                         if (independent_64b_blks)
5349                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5350                         else
5351                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5352                 }
5353
5354                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5355                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5356         }
5357
5358         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5359         if (ret)
5360                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5361
5362         return ret;
5363 }
5364
5365 static int
5366 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5367                              const struct amdgpu_framebuffer *afb,
5368                              const enum surface_pixel_format format,
5369                              const enum dc_rotation_angle rotation,
5370                              const uint64_t tiling_flags,
5371                              union dc_tiling_info *tiling_info,
5372                              struct plane_size *plane_size,
5373                              struct dc_plane_dcc_param *dcc,
5374                              struct dc_plane_address *address,
5375                              bool tmz_surface,
5376                              bool force_disable_dcc)
5377 {
5378         const struct drm_framebuffer *fb = &afb->base;
5379         int ret;
5380
5381         memset(tiling_info, 0, sizeof(*tiling_info));
5382         memset(plane_size, 0, sizeof(*plane_size));
5383         memset(dcc, 0, sizeof(*dcc));
5384         memset(address, 0, sizeof(*address));
5385
5386         address->tmz_surface = tmz_surface;
5387
5388         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5389                 uint64_t addr = afb->address + fb->offsets[0];
5390
5391                 plane_size->surface_size.x = 0;
5392                 plane_size->surface_size.y = 0;
5393                 plane_size->surface_size.width = fb->width;
5394                 plane_size->surface_size.height = fb->height;
5395                 plane_size->surface_pitch =
5396                         fb->pitches[0] / fb->format->cpp[0];
5397
5398                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5399                 address->grph.addr.low_part = lower_32_bits(addr);
5400                 address->grph.addr.high_part = upper_32_bits(addr);
5401         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5402                 uint64_t luma_addr = afb->address + fb->offsets[0];
5403                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5404
5405                 plane_size->surface_size.x = 0;
5406                 plane_size->surface_size.y = 0;
5407                 plane_size->surface_size.width = fb->width;
5408                 plane_size->surface_size.height = fb->height;
5409                 plane_size->surface_pitch =
5410                         fb->pitches[0] / fb->format->cpp[0];
5411
5412                 plane_size->chroma_size.x = 0;
5413                 plane_size->chroma_size.y = 0;
5414                 /* TODO: set these based on surface format */
5415                 plane_size->chroma_size.width = fb->width / 2;
5416                 plane_size->chroma_size.height = fb->height / 2;
5417
5418                 plane_size->chroma_pitch =
5419                         fb->pitches[1] / fb->format->cpp[1];
5420
5421                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5422                 address->video_progressive.luma_addr.low_part =
5423                         lower_32_bits(luma_addr);
5424                 address->video_progressive.luma_addr.high_part =
5425                         upper_32_bits(luma_addr);
5426                 address->video_progressive.chroma_addr.low_part =
5427                         lower_32_bits(chroma_addr);
5428                 address->video_progressive.chroma_addr.high_part =
5429                         upper_32_bits(chroma_addr);
5430         }
5431
5432         if (adev->family >= AMDGPU_FAMILY_AI) {
5433                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5434                                                                 rotation, plane_size,
5435                                                                 tiling_info, dcc,
5436                                                                 address,
5437                                                                 force_disable_dcc);
5438                 if (ret)
5439                         return ret;
5440         } else {
5441                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5442         }
5443
5444         return 0;
5445 }
5446
5447 static void
5448 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5449                                bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5450                                bool *global_alpha, int *global_alpha_value)
5451 {
5452         *per_pixel_alpha = false;
5453         *pre_multiplied_alpha = true;
5454         *global_alpha = false;
5455         *global_alpha_value = 0xff;
5456
5457         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5458                 return;
5459
5460         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5461                 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5462                 static const uint32_t alpha_formats[] = {
5463                         DRM_FORMAT_ARGB8888,
5464                         DRM_FORMAT_RGBA8888,
5465                         DRM_FORMAT_ABGR8888,
5466                 };
5467                 uint32_t format = plane_state->fb->format->format;
5468                 unsigned int i;
5469
5470                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5471                         if (format == alpha_formats[i]) {
5472                                 *per_pixel_alpha = true;
5473                                 break;
5474                         }
5475                 }
5476
5477                 if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5478                         *pre_multiplied_alpha = false;
5479         }
5480
5481         if (plane_state->alpha < 0xffff) {
5482                 *global_alpha = true;
5483                 *global_alpha_value = plane_state->alpha >> 8;
5484         }
5485 }
5486
5487 static int
5488 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5489                             const enum surface_pixel_format format,
5490                             enum dc_color_space *color_space)
5491 {
5492         bool full_range;
5493
5494         *color_space = COLOR_SPACE_SRGB;
5495
5496         /* DRM color properties only affect non-RGB formats. */
5497         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5498                 return 0;
5499
5500         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5501
5502         switch (plane_state->color_encoding) {
5503         case DRM_COLOR_YCBCR_BT601:
5504                 if (full_range)
5505                         *color_space = COLOR_SPACE_YCBCR601;
5506                 else
5507                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5508                 break;
5509
5510         case DRM_COLOR_YCBCR_BT709:
5511                 if (full_range)
5512                         *color_space = COLOR_SPACE_YCBCR709;
5513                 else
5514                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5515                 break;
5516
5517         case DRM_COLOR_YCBCR_BT2020:
5518                 if (full_range)
5519                         *color_space = COLOR_SPACE_2020_YCBCR;
5520                 else
5521                         return -EINVAL;
5522                 break;
5523
5524         default:
5525                 return -EINVAL;
5526         }
5527
5528         return 0;
5529 }
5530
5531 static int
5532 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5533                             const struct drm_plane_state *plane_state,
5534                             const uint64_t tiling_flags,
5535                             struct dc_plane_info *plane_info,
5536                             struct dc_plane_address *address,
5537                             bool tmz_surface,
5538                             bool force_disable_dcc)
5539 {
5540         const struct drm_framebuffer *fb = plane_state->fb;
5541         const struct amdgpu_framebuffer *afb =
5542                 to_amdgpu_framebuffer(plane_state->fb);
5543         int ret;
5544
5545         memset(plane_info, 0, sizeof(*plane_info));
5546
5547         switch (fb->format->format) {
5548         case DRM_FORMAT_C8:
5549                 plane_info->format =
5550                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5551                 break;
5552         case DRM_FORMAT_RGB565:
5553                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5554                 break;
5555         case DRM_FORMAT_XRGB8888:
5556         case DRM_FORMAT_ARGB8888:
5557                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5558                 break;
5559         case DRM_FORMAT_XRGB2101010:
5560         case DRM_FORMAT_ARGB2101010:
5561                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5562                 break;
5563         case DRM_FORMAT_XBGR2101010:
5564         case DRM_FORMAT_ABGR2101010:
5565                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5566                 break;
5567         case DRM_FORMAT_XBGR8888:
5568         case DRM_FORMAT_ABGR8888:
5569                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5570                 break;
5571         case DRM_FORMAT_NV21:
5572                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5573                 break;
5574         case DRM_FORMAT_NV12:
5575                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5576                 break;
5577         case DRM_FORMAT_P010:
5578                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5579                 break;
5580         case DRM_FORMAT_XRGB16161616F:
5581         case DRM_FORMAT_ARGB16161616F:
5582                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5583                 break;
5584         case DRM_FORMAT_XBGR16161616F:
5585         case DRM_FORMAT_ABGR16161616F:
5586                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5587                 break;
5588         case DRM_FORMAT_XRGB16161616:
5589         case DRM_FORMAT_ARGB16161616:
5590                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5591                 break;
5592         case DRM_FORMAT_XBGR16161616:
5593         case DRM_FORMAT_ABGR16161616:
5594                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5595                 break;
5596         default:
5597                 DRM_ERROR(
5598                         "Unsupported screen format %p4cc\n",
5599                         &fb->format->format);
5600                 return -EINVAL;
5601         }
5602
5603         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5604         case DRM_MODE_ROTATE_0:
5605                 plane_info->rotation = ROTATION_ANGLE_0;
5606                 break;
5607         case DRM_MODE_ROTATE_90:
5608                 plane_info->rotation = ROTATION_ANGLE_90;
5609                 break;
5610         case DRM_MODE_ROTATE_180:
5611                 plane_info->rotation = ROTATION_ANGLE_180;
5612                 break;
5613         case DRM_MODE_ROTATE_270:
5614                 plane_info->rotation = ROTATION_ANGLE_270;
5615                 break;
5616         default:
5617                 plane_info->rotation = ROTATION_ANGLE_0;
5618                 break;
5619         }
5620
5621         plane_info->visible = true;
5622         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5623
5624         plane_info->layer_index = 0;
5625
5626         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5627                                           &plane_info->color_space);
5628         if (ret)
5629                 return ret;
5630
5631         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5632                                            plane_info->rotation, tiling_flags,
5633                                            &plane_info->tiling_info,
5634                                            &plane_info->plane_size,
5635                                            &plane_info->dcc, address, tmz_surface,
5636                                            force_disable_dcc);
5637         if (ret)
5638                 return ret;
5639
5640         fill_blending_from_plane_state(
5641                 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5642                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5643
5644         return 0;
5645 }
5646
5647 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5648                                     struct dc_plane_state *dc_plane_state,
5649                                     struct drm_plane_state *plane_state,
5650                                     struct drm_crtc_state *crtc_state)
5651 {
5652         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5653         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5654         struct dc_scaling_info scaling_info;
5655         struct dc_plane_info plane_info;
5656         int ret;
5657         bool force_disable_dcc = false;
5658
5659         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5660         if (ret)
5661                 return ret;
5662
5663         dc_plane_state->src_rect = scaling_info.src_rect;
5664         dc_plane_state->dst_rect = scaling_info.dst_rect;
5665         dc_plane_state->clip_rect = scaling_info.clip_rect;
5666         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5667
5668         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5669         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5670                                           afb->tiling_flags,
5671                                           &plane_info,
5672                                           &dc_plane_state->address,
5673                                           afb->tmz_surface,
5674                                           force_disable_dcc);
5675         if (ret)
5676                 return ret;
5677
5678         dc_plane_state->format = plane_info.format;
5679         dc_plane_state->color_space = plane_info.color_space;
5680         dc_plane_state->format = plane_info.format;
5681         dc_plane_state->plane_size = plane_info.plane_size;
5682         dc_plane_state->rotation = plane_info.rotation;
5683         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5684         dc_plane_state->stereo_format = plane_info.stereo_format;
5685         dc_plane_state->tiling_info = plane_info.tiling_info;
5686         dc_plane_state->visible = plane_info.visible;
5687         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5688         dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5689         dc_plane_state->global_alpha = plane_info.global_alpha;
5690         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5691         dc_plane_state->dcc = plane_info.dcc;
5692         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5693         dc_plane_state->flip_int_enabled = true;
5694
5695         /*
5696          * Always set input transfer function, since plane state is refreshed
5697          * every time.
5698          */
5699         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5700         if (ret)
5701                 return ret;
5702
5703         return 0;
5704 }
5705
5706 /**
5707  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5708  *
5709  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5710  *         remote fb
5711  * @old_plane_state: Old state of @plane
5712  * @new_plane_state: New state of @plane
5713  * @crtc_state: New state of CRTC connected to the @plane
5714  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5715  *
5716  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5717  * (referred to as "damage clips" in DRM nomenclature) that require updating on
5718  * the eDP remote buffer. The responsibility of specifying the dirty regions is
5719  * amdgpu_dm's.
5720  *
5721  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5722  * plane with regions that require flushing to the eDP remote buffer. In
5723  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5724  * implicitly provide damage clips without any client support via the plane
5725  * bounds.
5726  *
5727  * Today, amdgpu_dm only supports the MPO and cursor usecase.
5728  *
5729  * TODO: Also enable for FB_DAMAGE_CLIPS
5730  */
5731 static void fill_dc_dirty_rects(struct drm_plane *plane,
5732                                 struct drm_plane_state *old_plane_state,
5733                                 struct drm_plane_state *new_plane_state,
5734                                 struct drm_crtc_state *crtc_state,
5735                                 struct dc_flip_addrs *flip_addrs)
5736 {
5737         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5738         struct rect *dirty_rects = flip_addrs->dirty_rects;
5739         uint32_t num_clips;
5740         bool bb_changed;
5741         bool fb_changed;
5742         uint32_t i = 0;
5743
5744         flip_addrs->dirty_rect_count = 0;
5745
5746         /*
5747          * Cursor plane has it's own dirty rect update interface. See
5748          * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5749          */
5750         if (plane->type == DRM_PLANE_TYPE_CURSOR)
5751                 return;
5752
5753         /*
5754          * Today, we only consider MPO use-case for PSR SU. If MPO not
5755          * requested, and there is a plane update, do FFU.
5756          */
5757         if (!dm_crtc_state->mpo_requested) {
5758                 dirty_rects[0].x = 0;
5759                 dirty_rects[0].y = 0;
5760                 dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
5761                 dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
5762                 flip_addrs->dirty_rect_count = 1;
5763                 DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5764                                  new_plane_state->plane->base.id,
5765                                  dm_crtc_state->base.mode.crtc_hdisplay,
5766                                  dm_crtc_state->base.mode.crtc_vdisplay);
5767                 return;
5768         }
5769
5770         /*
5771          * MPO is requested. Add entire plane bounding box to dirty rects if
5772          * flipped to or damaged.
5773          *
5774          * If plane is moved or resized, also add old bounding box to dirty
5775          * rects.
5776          */
5777         num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5778         fb_changed = old_plane_state->fb->base.id !=
5779                      new_plane_state->fb->base.id;
5780         bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5781                       old_plane_state->crtc_y != new_plane_state->crtc_y ||
5782                       old_plane_state->crtc_w != new_plane_state->crtc_w ||
5783                       old_plane_state->crtc_h != new_plane_state->crtc_h);
5784
5785         DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5786                          new_plane_state->plane->base.id,
5787                          bb_changed, fb_changed, num_clips);
5788
5789         if (num_clips || fb_changed || bb_changed) {
5790                 dirty_rects[i].x = new_plane_state->crtc_x;
5791                 dirty_rects[i].y = new_plane_state->crtc_y;
5792                 dirty_rects[i].width = new_plane_state->crtc_w;
5793                 dirty_rects[i].height = new_plane_state->crtc_h;
5794                 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5795                                  new_plane_state->plane->base.id,
5796                                  dirty_rects[i].x, dirty_rects[i].y,
5797                                  dirty_rects[i].width, dirty_rects[i].height);
5798                 i += 1;
5799         }
5800
5801         /* Add old plane bounding-box if plane is moved or resized */
5802         if (bb_changed) {
5803                 dirty_rects[i].x = old_plane_state->crtc_x;
5804                 dirty_rects[i].y = old_plane_state->crtc_y;
5805                 dirty_rects[i].width = old_plane_state->crtc_w;
5806                 dirty_rects[i].height = old_plane_state->crtc_h;
5807                 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5808                                 old_plane_state->plane->base.id,
5809                                 dirty_rects[i].x, dirty_rects[i].y,
5810                                 dirty_rects[i].width, dirty_rects[i].height);
5811                 i += 1;
5812         }
5813
5814         flip_addrs->dirty_rect_count = i;
5815 }
5816
5817 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5818                                            const struct dm_connector_state *dm_state,
5819                                            struct dc_stream_state *stream)
5820 {
5821         enum amdgpu_rmx_type rmx_type;
5822
5823         struct rect src = { 0 }; /* viewport in composition space*/
5824         struct rect dst = { 0 }; /* stream addressable area */
5825
5826         /* no mode. nothing to be done */
5827         if (!mode)
5828                 return;
5829
5830         /* Full screen scaling by default */
5831         src.width = mode->hdisplay;
5832         src.height = mode->vdisplay;
5833         dst.width = stream->timing.h_addressable;
5834         dst.height = stream->timing.v_addressable;
5835
5836         if (dm_state) {
5837                 rmx_type = dm_state->scaling;
5838                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5839                         if (src.width * dst.height <
5840                                         src.height * dst.width) {
5841                                 /* height needs less upscaling/more downscaling */
5842                                 dst.width = src.width *
5843                                                 dst.height / src.height;
5844                         } else {
5845                                 /* width needs less upscaling/more downscaling */
5846                                 dst.height = src.height *
5847                                                 dst.width / src.width;
5848                         }
5849                 } else if (rmx_type == RMX_CENTER) {
5850                         dst = src;
5851                 }
5852
5853                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5854                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5855
5856                 if (dm_state->underscan_enable) {
5857                         dst.x += dm_state->underscan_hborder / 2;
5858                         dst.y += dm_state->underscan_vborder / 2;
5859                         dst.width -= dm_state->underscan_hborder;
5860                         dst.height -= dm_state->underscan_vborder;
5861                 }
5862         }
5863
5864         stream->src = src;
5865         stream->dst = dst;
5866
5867         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5868                       dst.x, dst.y, dst.width, dst.height);
5869
5870 }
5871
5872 static enum dc_color_depth
5873 convert_color_depth_from_display_info(const struct drm_connector *connector,
5874                                       bool is_y420, int requested_bpc)
5875 {
5876         uint8_t bpc;
5877
5878         if (is_y420) {
5879                 bpc = 8;
5880
5881                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5882                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5883                         bpc = 16;
5884                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5885                         bpc = 12;
5886                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5887                         bpc = 10;
5888         } else {
5889                 bpc = (uint8_t)connector->display_info.bpc;
5890                 /* Assume 8 bpc by default if no bpc is specified. */
5891                 bpc = bpc ? bpc : 8;
5892         }
5893
5894         if (requested_bpc > 0) {
5895                 /*
5896                  * Cap display bpc based on the user requested value.
5897                  *
5898                  * The value for state->max_bpc may not correctly updated
5899                  * depending on when the connector gets added to the state
5900                  * or if this was called outside of atomic check, so it
5901                  * can't be used directly.
5902                  */
5903                 bpc = min_t(u8, bpc, requested_bpc);
5904
5905                 /* Round down to the nearest even number. */
5906                 bpc = bpc - (bpc & 1);
5907         }
5908
5909         switch (bpc) {
5910         case 0:
5911                 /*
5912                  * Temporary Work around, DRM doesn't parse color depth for
5913                  * EDID revision before 1.4
5914                  * TODO: Fix edid parsing
5915                  */
5916                 return COLOR_DEPTH_888;
5917         case 6:
5918                 return COLOR_DEPTH_666;
5919         case 8:
5920                 return COLOR_DEPTH_888;
5921         case 10:
5922                 return COLOR_DEPTH_101010;
5923         case 12:
5924                 return COLOR_DEPTH_121212;
5925         case 14:
5926                 return COLOR_DEPTH_141414;
5927         case 16:
5928                 return COLOR_DEPTH_161616;
5929         default:
5930                 return COLOR_DEPTH_UNDEFINED;
5931         }
5932 }
5933
5934 static enum dc_aspect_ratio
5935 get_aspect_ratio(const struct drm_display_mode *mode_in)
5936 {
5937         /* 1-1 mapping, since both enums follow the HDMI spec. */
5938         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5939 }
5940
5941 static enum dc_color_space
5942 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5943 {
5944         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5945
5946         switch (dc_crtc_timing->pixel_encoding) {
5947         case PIXEL_ENCODING_YCBCR422:
5948         case PIXEL_ENCODING_YCBCR444:
5949         case PIXEL_ENCODING_YCBCR420:
5950         {
5951                 /*
5952                  * 27030khz is the separation point between HDTV and SDTV
5953                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5954                  * respectively
5955                  */
5956                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5957                         if (dc_crtc_timing->flags.Y_ONLY)
5958                                 color_space =
5959                                         COLOR_SPACE_YCBCR709_LIMITED;
5960                         else
5961                                 color_space = COLOR_SPACE_YCBCR709;
5962                 } else {
5963                         if (dc_crtc_timing->flags.Y_ONLY)
5964                                 color_space =
5965                                         COLOR_SPACE_YCBCR601_LIMITED;
5966                         else
5967                                 color_space = COLOR_SPACE_YCBCR601;
5968                 }
5969
5970         }
5971         break;
5972         case PIXEL_ENCODING_RGB:
5973                 color_space = COLOR_SPACE_SRGB;
5974                 break;
5975
5976         default:
5977                 WARN_ON(1);
5978                 break;
5979         }
5980
5981         return color_space;
5982 }
5983
5984 static bool adjust_colour_depth_from_display_info(
5985         struct dc_crtc_timing *timing_out,
5986         const struct drm_display_info *info)
5987 {
5988         enum dc_color_depth depth = timing_out->display_color_depth;
5989         int normalized_clk;
5990         do {
5991                 normalized_clk = timing_out->pix_clk_100hz / 10;
5992                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5993                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5994                         normalized_clk /= 2;
5995                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5996                 switch (depth) {
5997                 case COLOR_DEPTH_888:
5998                         break;
5999                 case COLOR_DEPTH_101010:
6000                         normalized_clk = (normalized_clk * 30) / 24;
6001                         break;
6002                 case COLOR_DEPTH_121212:
6003                         normalized_clk = (normalized_clk * 36) / 24;
6004                         break;
6005                 case COLOR_DEPTH_161616:
6006                         normalized_clk = (normalized_clk * 48) / 24;
6007                         break;
6008                 default:
6009                         /* The above depths are the only ones valid for HDMI. */
6010                         return false;
6011                 }
6012                 if (normalized_clk <= info->max_tmds_clock) {
6013                         timing_out->display_color_depth = depth;
6014                         return true;
6015                 }
6016         } while (--depth > COLOR_DEPTH_666);
6017         return false;
6018 }
6019
6020 static void fill_stream_properties_from_drm_display_mode(
6021         struct dc_stream_state *stream,
6022         const struct drm_display_mode *mode_in,
6023         const struct drm_connector *connector,
6024         const struct drm_connector_state *connector_state,
6025         const struct dc_stream_state *old_stream,
6026         int requested_bpc)
6027 {
6028         struct dc_crtc_timing *timing_out = &stream->timing;
6029         const struct drm_display_info *info = &connector->display_info;
6030         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6031         struct hdmi_vendor_infoframe hv_frame;
6032         struct hdmi_avi_infoframe avi_frame;
6033
6034         memset(&hv_frame, 0, sizeof(hv_frame));
6035         memset(&avi_frame, 0, sizeof(avi_frame));
6036
6037         timing_out->h_border_left = 0;
6038         timing_out->h_border_right = 0;
6039         timing_out->v_border_top = 0;
6040         timing_out->v_border_bottom = 0;
6041         /* TODO: un-hardcode */
6042         if (drm_mode_is_420_only(info, mode_in)
6043                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6044                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6045         else if (drm_mode_is_420_also(info, mode_in)
6046                         && aconnector->force_yuv420_output)
6047                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6048         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
6049                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6050                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
6051         else
6052                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
6053
6054         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
6055         timing_out->display_color_depth = convert_color_depth_from_display_info(
6056                 connector,
6057                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
6058                 requested_bpc);
6059         timing_out->scan_type = SCANNING_TYPE_NODATA;
6060         timing_out->hdmi_vic = 0;
6061
6062         if(old_stream) {
6063                 timing_out->vic = old_stream->timing.vic;
6064                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
6065                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
6066         } else {
6067                 timing_out->vic = drm_match_cea_mode(mode_in);
6068                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
6069                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
6070                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
6071                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
6072         }
6073
6074         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6075                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
6076                 timing_out->vic = avi_frame.video_code;
6077                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
6078                 timing_out->hdmi_vic = hv_frame.vic;
6079         }
6080
6081         if (is_freesync_video_mode(mode_in, aconnector)) {
6082                 timing_out->h_addressable = mode_in->hdisplay;
6083                 timing_out->h_total = mode_in->htotal;
6084                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
6085                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
6086                 timing_out->v_total = mode_in->vtotal;
6087                 timing_out->v_addressable = mode_in->vdisplay;
6088                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
6089                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
6090                 timing_out->pix_clk_100hz = mode_in->clock * 10;
6091         } else {
6092                 timing_out->h_addressable = mode_in->crtc_hdisplay;
6093                 timing_out->h_total = mode_in->crtc_htotal;
6094                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
6095                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
6096                 timing_out->v_total = mode_in->crtc_vtotal;
6097                 timing_out->v_addressable = mode_in->crtc_vdisplay;
6098                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
6099                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
6100                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
6101         }
6102
6103         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
6104
6105         stream->output_color_space = get_output_color_space(timing_out);
6106
6107         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
6108         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
6109         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6110                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6111                     drm_mode_is_420_also(info, mode_in) &&
6112                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6113                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6114                         adjust_colour_depth_from_display_info(timing_out, info);
6115                 }
6116         }
6117 }
6118
6119 static void fill_audio_info(struct audio_info *audio_info,
6120                             const struct drm_connector *drm_connector,
6121                             const struct dc_sink *dc_sink)
6122 {
6123         int i = 0;
6124         int cea_revision = 0;
6125         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6126
6127         audio_info->manufacture_id = edid_caps->manufacturer_id;
6128         audio_info->product_id = edid_caps->product_id;
6129
6130         cea_revision = drm_connector->display_info.cea_rev;
6131
6132         strscpy(audio_info->display_name,
6133                 edid_caps->display_name,
6134                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
6135
6136         if (cea_revision >= 3) {
6137                 audio_info->mode_count = edid_caps->audio_mode_count;
6138
6139                 for (i = 0; i < audio_info->mode_count; ++i) {
6140                         audio_info->modes[i].format_code =
6141                                         (enum audio_format_code)
6142                                         (edid_caps->audio_modes[i].format_code);
6143                         audio_info->modes[i].channel_count =
6144                                         edid_caps->audio_modes[i].channel_count;
6145                         audio_info->modes[i].sample_rates.all =
6146                                         edid_caps->audio_modes[i].sample_rate;
6147                         audio_info->modes[i].sample_size =
6148                                         edid_caps->audio_modes[i].sample_size;
6149                 }
6150         }
6151
6152         audio_info->flags.all = edid_caps->speaker_flags;
6153
6154         /* TODO: We only check for the progressive mode, check for interlace mode too */
6155         if (drm_connector->latency_present[0]) {
6156                 audio_info->video_latency = drm_connector->video_latency[0];
6157                 audio_info->audio_latency = drm_connector->audio_latency[0];
6158         }
6159
6160         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6161
6162 }
6163
6164 static void
6165 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6166                                       struct drm_display_mode *dst_mode)
6167 {
6168         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6169         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6170         dst_mode->crtc_clock = src_mode->crtc_clock;
6171         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6172         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6173         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6174         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6175         dst_mode->crtc_htotal = src_mode->crtc_htotal;
6176         dst_mode->crtc_hskew = src_mode->crtc_hskew;
6177         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6178         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6179         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6180         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6181         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6182 }
6183
6184 static void
6185 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6186                                         const struct drm_display_mode *native_mode,
6187                                         bool scale_enabled)
6188 {
6189         if (scale_enabled) {
6190                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6191         } else if (native_mode->clock == drm_mode->clock &&
6192                         native_mode->htotal == drm_mode->htotal &&
6193                         native_mode->vtotal == drm_mode->vtotal) {
6194                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6195         } else {
6196                 /* no scaling nor amdgpu inserted, no need to patch */
6197         }
6198 }
6199
6200 static struct dc_sink *
6201 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6202 {
6203         struct dc_sink_init_data sink_init_data = { 0 };
6204         struct dc_sink *sink = NULL;
6205         sink_init_data.link = aconnector->dc_link;
6206         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6207
6208         sink = dc_sink_create(&sink_init_data);
6209         if (!sink) {
6210                 DRM_ERROR("Failed to create sink!\n");
6211                 return NULL;
6212         }
6213         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6214
6215         return sink;
6216 }
6217
6218 static void set_multisync_trigger_params(
6219                 struct dc_stream_state *stream)
6220 {
6221         struct dc_stream_state *master = NULL;
6222
6223         if (stream->triggered_crtc_reset.enabled) {
6224                 master = stream->triggered_crtc_reset.event_source;
6225                 stream->triggered_crtc_reset.event =
6226                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6227                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6228                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6229         }
6230 }
6231
6232 static void set_master_stream(struct dc_stream_state *stream_set[],
6233                               int stream_count)
6234 {
6235         int j, highest_rfr = 0, master_stream = 0;
6236
6237         for (j = 0;  j < stream_count; j++) {
6238                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6239                         int refresh_rate = 0;
6240
6241                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6242                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6243                         if (refresh_rate > highest_rfr) {
6244                                 highest_rfr = refresh_rate;
6245                                 master_stream = j;
6246                         }
6247                 }
6248         }
6249         for (j = 0;  j < stream_count; j++) {
6250                 if (stream_set[j])
6251                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6252         }
6253 }
6254
6255 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6256 {
6257         int i = 0;
6258         struct dc_stream_state *stream;
6259
6260         if (context->stream_count < 2)
6261                 return;
6262         for (i = 0; i < context->stream_count ; i++) {
6263                 if (!context->streams[i])
6264                         continue;
6265                 /*
6266                  * TODO: add a function to read AMD VSDB bits and set
6267                  * crtc_sync_master.multi_sync_enabled flag
6268                  * For now it's set to false
6269                  */
6270         }
6271
6272         set_master_stream(context->streams, context->stream_count);
6273
6274         for (i = 0; i < context->stream_count ; i++) {
6275                 stream = context->streams[i];
6276
6277                 if (!stream)
6278                         continue;
6279
6280                 set_multisync_trigger_params(stream);
6281         }
6282 }
6283
6284 #if defined(CONFIG_DRM_AMD_DC_DCN)
6285 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6286                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6287                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6288 {
6289         stream->timing.flags.DSC = 0;
6290         dsc_caps->is_dsc_supported = false;
6291
6292         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6293                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6294                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6295                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6296                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6297                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6298                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6299                                 dsc_caps);
6300         }
6301 }
6302
6303 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6304                                     struct dc_sink *sink, struct dc_stream_state *stream,
6305                                     struct dsc_dec_dpcd_caps *dsc_caps,
6306                                     uint32_t max_dsc_target_bpp_limit_override)
6307 {
6308         const struct dc_link_settings *verified_link_cap = NULL;
6309         uint32_t link_bw_in_kbps;
6310         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6311         struct dc *dc = sink->ctx->dc;
6312         struct dc_dsc_bw_range bw_range = {0};
6313         struct dc_dsc_config dsc_cfg = {0};
6314
6315         verified_link_cap = dc_link_get_link_cap(stream->link);
6316         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6317         edp_min_bpp_x16 = 8 * 16;
6318         edp_max_bpp_x16 = 8 * 16;
6319
6320         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6321                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6322
6323         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6324                 edp_min_bpp_x16 = edp_max_bpp_x16;
6325
6326         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6327                                 dc->debug.dsc_min_slice_height_override,
6328                                 edp_min_bpp_x16, edp_max_bpp_x16,
6329                                 dsc_caps,
6330                                 &stream->timing,
6331                                 &bw_range)) {
6332
6333                 if (bw_range.max_kbps < link_bw_in_kbps) {
6334                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6335                                         dsc_caps,
6336                                         dc->debug.dsc_min_slice_height_override,
6337                                         max_dsc_target_bpp_limit_override,
6338                                         0,
6339                                         &stream->timing,
6340                                         &dsc_cfg)) {
6341                                 stream->timing.dsc_cfg = dsc_cfg;
6342                                 stream->timing.flags.DSC = 1;
6343                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6344                         }
6345                         return;
6346                 }
6347         }
6348
6349         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6350                                 dsc_caps,
6351                                 dc->debug.dsc_min_slice_height_override,
6352                                 max_dsc_target_bpp_limit_override,
6353                                 link_bw_in_kbps,
6354                                 &stream->timing,
6355                                 &dsc_cfg)) {
6356                 stream->timing.dsc_cfg = dsc_cfg;
6357                 stream->timing.flags.DSC = 1;
6358         }
6359 }
6360
6361 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6362                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6363                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6364 {
6365         struct drm_connector *drm_connector = &aconnector->base;
6366         uint32_t link_bandwidth_kbps;
6367         uint32_t max_dsc_target_bpp_limit_override = 0;
6368         struct dc *dc = sink->ctx->dc;
6369         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6370         uint32_t dsc_max_supported_bw_in_kbps;
6371
6372         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6373                                                         dc_link_get_link_cap(aconnector->dc_link));
6374
6375         if (stream->link && stream->link->local_sink)
6376                 max_dsc_target_bpp_limit_override =
6377                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6378
6379         /* Set DSC policy according to dsc_clock_en */
6380         dc_dsc_policy_set_enable_dsc_when_not_needed(
6381                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6382
6383         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6384             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6385
6386                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6387
6388         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6389                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6390                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6391                                                 dsc_caps,
6392                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6393                                                 max_dsc_target_bpp_limit_override,
6394                                                 link_bandwidth_kbps,
6395                                                 &stream->timing,
6396                                                 &stream->timing.dsc_cfg)) {
6397                                 stream->timing.flags.DSC = 1;
6398                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6399                                                                  __func__, drm_connector->name);
6400                         }
6401                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6402                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6403                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6404                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6405
6406                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6407                                         max_supported_bw_in_kbps > 0 &&
6408                                         dsc_max_supported_bw_in_kbps > 0)
6409                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6410                                                 dsc_caps,
6411                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6412                                                 max_dsc_target_bpp_limit_override,
6413                                                 dsc_max_supported_bw_in_kbps,
6414                                                 &stream->timing,
6415                                                 &stream->timing.dsc_cfg)) {
6416                                         stream->timing.flags.DSC = 1;
6417                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6418                                                                          __func__, drm_connector->name);
6419                                 }
6420                 }
6421         }
6422
6423         /* Overwrite the stream flag if DSC is enabled through debugfs */
6424         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6425                 stream->timing.flags.DSC = 1;
6426
6427         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6428                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6429
6430         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6431                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6432
6433         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6434                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6435 }
6436 #endif /* CONFIG_DRM_AMD_DC_DCN */
6437
6438 /**
6439  * DOC: FreeSync Video
6440  *
6441  * When a userspace application wants to play a video, the content follows a
6442  * standard format definition that usually specifies the FPS for that format.
6443  * The below list illustrates some video format and the expected FPS,
6444  * respectively:
6445  *
6446  * - TV/NTSC (23.976 FPS)
6447  * - Cinema (24 FPS)
6448  * - TV/PAL (25 FPS)
6449  * - TV/NTSC (29.97 FPS)
6450  * - TV/NTSC (30 FPS)
6451  * - Cinema HFR (48 FPS)
6452  * - TV/PAL (50 FPS)
6453  * - Commonly used (60 FPS)
6454  * - Multiples of 24 (48,72,96,120 FPS)
6455  *
6456  * The list of standards video format is not huge and can be added to the
6457  * connector modeset list beforehand. With that, userspace can leverage
6458  * FreeSync to extends the front porch in order to attain the target refresh
6459  * rate. Such a switch will happen seamlessly, without screen blanking or
6460  * reprogramming of the output in any other way. If the userspace requests a
6461  * modesetting change compatible with FreeSync modes that only differ in the
6462  * refresh rate, DC will skip the full update and avoid blink during the
6463  * transition. For example, the video player can change the modesetting from
6464  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6465  * causing any display blink. This same concept can be applied to a mode
6466  * setting change.
6467  */
6468 static struct drm_display_mode *
6469 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6470                           bool use_probed_modes)
6471 {
6472         struct drm_display_mode *m, *m_pref = NULL;
6473         u16 current_refresh, highest_refresh;
6474         struct list_head *list_head = use_probed_modes ?
6475                                                     &aconnector->base.probed_modes :
6476                                                     &aconnector->base.modes;
6477
6478         if (aconnector->freesync_vid_base.clock != 0)
6479                 return &aconnector->freesync_vid_base;
6480
6481         /* Find the preferred mode */
6482         list_for_each_entry (m, list_head, head) {
6483                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6484                         m_pref = m;
6485                         break;
6486                 }
6487         }
6488
6489         if (!m_pref) {
6490                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6491                 m_pref = list_first_entry_or_null(
6492                         &aconnector->base.modes, struct drm_display_mode, head);
6493                 if (!m_pref) {
6494                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6495                         return NULL;
6496                 }
6497         }
6498
6499         highest_refresh = drm_mode_vrefresh(m_pref);
6500
6501         /*
6502          * Find the mode with highest refresh rate with same resolution.
6503          * For some monitors, preferred mode is not the mode with highest
6504          * supported refresh rate.
6505          */
6506         list_for_each_entry (m, list_head, head) {
6507                 current_refresh  = drm_mode_vrefresh(m);
6508
6509                 if (m->hdisplay == m_pref->hdisplay &&
6510                     m->vdisplay == m_pref->vdisplay &&
6511                     highest_refresh < current_refresh) {
6512                         highest_refresh = current_refresh;
6513                         m_pref = m;
6514                 }
6515         }
6516
6517         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6518         return m_pref;
6519 }
6520
6521 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6522                                    struct amdgpu_dm_connector *aconnector)
6523 {
6524         struct drm_display_mode *high_mode;
6525         int timing_diff;
6526
6527         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6528         if (!high_mode || !mode)
6529                 return false;
6530
6531         timing_diff = high_mode->vtotal - mode->vtotal;
6532
6533         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6534             high_mode->hdisplay != mode->hdisplay ||
6535             high_mode->vdisplay != mode->vdisplay ||
6536             high_mode->hsync_start != mode->hsync_start ||
6537             high_mode->hsync_end != mode->hsync_end ||
6538             high_mode->htotal != mode->htotal ||
6539             high_mode->hskew != mode->hskew ||
6540             high_mode->vscan != mode->vscan ||
6541             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6542             high_mode->vsync_end - mode->vsync_end != timing_diff)
6543                 return false;
6544         else
6545                 return true;
6546 }
6547
6548 static struct dc_stream_state *
6549 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6550                        const struct drm_display_mode *drm_mode,
6551                        const struct dm_connector_state *dm_state,
6552                        const struct dc_stream_state *old_stream,
6553                        int requested_bpc)
6554 {
6555         struct drm_display_mode *preferred_mode = NULL;
6556         struct drm_connector *drm_connector;
6557         const struct drm_connector_state *con_state =
6558                 dm_state ? &dm_state->base : NULL;
6559         struct dc_stream_state *stream = NULL;
6560         struct drm_display_mode mode = *drm_mode;
6561         struct drm_display_mode saved_mode;
6562         struct drm_display_mode *freesync_mode = NULL;
6563         bool native_mode_found = false;
6564         bool recalculate_timing = false;
6565         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6566         int mode_refresh;
6567         int preferred_refresh = 0;
6568 #if defined(CONFIG_DRM_AMD_DC_DCN)
6569         struct dsc_dec_dpcd_caps dsc_caps;
6570 #endif
6571         struct dc_sink *sink = NULL;
6572
6573         memset(&saved_mode, 0, sizeof(saved_mode));
6574
6575         if (aconnector == NULL) {
6576                 DRM_ERROR("aconnector is NULL!\n");
6577                 return stream;
6578         }
6579
6580         drm_connector = &aconnector->base;
6581
6582         if (!aconnector->dc_sink) {
6583                 sink = create_fake_sink(aconnector);
6584                 if (!sink)
6585                         return stream;
6586         } else {
6587                 sink = aconnector->dc_sink;
6588                 dc_sink_retain(sink);
6589         }
6590
6591         stream = dc_create_stream_for_sink(sink);
6592
6593         if (stream == NULL) {
6594                 DRM_ERROR("Failed to create stream for sink!\n");
6595                 goto finish;
6596         }
6597
6598         stream->dm_stream_context = aconnector;
6599
6600         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6601                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6602
6603         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6604                 /* Search for preferred mode */
6605                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6606                         native_mode_found = true;
6607                         break;
6608                 }
6609         }
6610         if (!native_mode_found)
6611                 preferred_mode = list_first_entry_or_null(
6612                                 &aconnector->base.modes,
6613                                 struct drm_display_mode,
6614                                 head);
6615
6616         mode_refresh = drm_mode_vrefresh(&mode);
6617
6618         if (preferred_mode == NULL) {
6619                 /*
6620                  * This may not be an error, the use case is when we have no
6621                  * usermode calls to reset and set mode upon hotplug. In this
6622                  * case, we call set mode ourselves to restore the previous mode
6623                  * and the modelist may not be filled in in time.
6624                  */
6625                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6626         } else {
6627                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6628                 if (recalculate_timing) {
6629                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6630                         drm_mode_copy(&saved_mode, &mode);
6631                         drm_mode_copy(&mode, freesync_mode);
6632                 } else {
6633                         decide_crtc_timing_for_drm_display_mode(
6634                                 &mode, preferred_mode, scale);
6635
6636                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6637                 }
6638         }
6639
6640         if (recalculate_timing)
6641                 drm_mode_set_crtcinfo(&saved_mode, 0);
6642         else if (!dm_state)
6643                 drm_mode_set_crtcinfo(&mode, 0);
6644
6645        /*
6646         * If scaling is enabled and refresh rate didn't change
6647         * we copy the vic and polarities of the old timings
6648         */
6649         if (!scale || mode_refresh != preferred_refresh)
6650                 fill_stream_properties_from_drm_display_mode(
6651                         stream, &mode, &aconnector->base, con_state, NULL,
6652                         requested_bpc);
6653         else
6654                 fill_stream_properties_from_drm_display_mode(
6655                         stream, &mode, &aconnector->base, con_state, old_stream,
6656                         requested_bpc);
6657
6658 #if defined(CONFIG_DRM_AMD_DC_DCN)
6659         /* SST DSC determination policy */
6660         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6661         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6662                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6663 #endif
6664
6665         update_stream_scaling_settings(&mode, dm_state, stream);
6666
6667         fill_audio_info(
6668                 &stream->audio_info,
6669                 drm_connector,
6670                 sink);
6671
6672         update_stream_signal(stream, sink);
6673
6674         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6675                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6676
6677         if (stream->link->psr_settings.psr_feature_enabled) {
6678                 //
6679                 // should decide stream support vsc sdp colorimetry capability
6680                 // before building vsc info packet
6681                 //
6682                 stream->use_vsc_sdp_for_colorimetry = false;
6683                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6684                         stream->use_vsc_sdp_for_colorimetry =
6685                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6686                 } else {
6687                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6688                                 stream->use_vsc_sdp_for_colorimetry = true;
6689                 }
6690                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6691                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6692
6693         }
6694 finish:
6695         dc_sink_release(sink);
6696
6697         return stream;
6698 }
6699
6700 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6701 {
6702         drm_crtc_cleanup(crtc);
6703         kfree(crtc);
6704 }
6705
6706 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6707                                   struct drm_crtc_state *state)
6708 {
6709         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6710
6711         /* TODO Destroy dc_stream objects are stream object is flattened */
6712         if (cur->stream)
6713                 dc_stream_release(cur->stream);
6714
6715
6716         __drm_atomic_helper_crtc_destroy_state(state);
6717
6718
6719         kfree(state);
6720 }
6721
6722 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6723 {
6724         struct dm_crtc_state *state;
6725
6726         if (crtc->state)
6727                 dm_crtc_destroy_state(crtc, crtc->state);
6728
6729         state = kzalloc(sizeof(*state), GFP_KERNEL);
6730         if (WARN_ON(!state))
6731                 return;
6732
6733         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6734 }
6735
6736 static struct drm_crtc_state *
6737 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6738 {
6739         struct dm_crtc_state *state, *cur;
6740
6741         cur = to_dm_crtc_state(crtc->state);
6742
6743         if (WARN_ON(!crtc->state))
6744                 return NULL;
6745
6746         state = kzalloc(sizeof(*state), GFP_KERNEL);
6747         if (!state)
6748                 return NULL;
6749
6750         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6751
6752         if (cur->stream) {
6753                 state->stream = cur->stream;
6754                 dc_stream_retain(state->stream);
6755         }
6756
6757         state->active_planes = cur->active_planes;
6758         state->vrr_infopacket = cur->vrr_infopacket;
6759         state->abm_level = cur->abm_level;
6760         state->vrr_supported = cur->vrr_supported;
6761         state->freesync_config = cur->freesync_config;
6762         state->cm_has_degamma = cur->cm_has_degamma;
6763         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6764         state->mpo_requested = cur->mpo_requested;
6765         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6766
6767         return &state->base;
6768 }
6769
6770 #ifdef CONFIG_DEBUG_FS
6771 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6772 {
6773         crtc_debugfs_init(crtc);
6774
6775         return 0;
6776 }
6777 #endif
6778
6779 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6780 {
6781         enum dc_irq_source irq_source;
6782         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6783         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6784         int rc;
6785
6786         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6787
6788         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6789
6790         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6791                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6792         return rc;
6793 }
6794
6795 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6796 {
6797         enum dc_irq_source irq_source;
6798         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6799         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6800         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6801         struct amdgpu_display_manager *dm = &adev->dm;
6802         struct vblank_control_work *work;
6803         int rc = 0;
6804
6805         if (enable) {
6806                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6807                 if (amdgpu_dm_vrr_active(acrtc_state))
6808                         rc = dm_set_vupdate_irq(crtc, true);
6809         } else {
6810                 /* vblank irq off -> vupdate irq off */
6811                 rc = dm_set_vupdate_irq(crtc, false);
6812         }
6813
6814         if (rc)
6815                 return rc;
6816
6817         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6818
6819         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6820                 return -EBUSY;
6821
6822         if (amdgpu_in_reset(adev))
6823                 return 0;
6824
6825         if (dm->vblank_control_workqueue) {
6826                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6827                 if (!work)
6828                         return -ENOMEM;
6829
6830                 INIT_WORK(&work->work, vblank_control_worker);
6831                 work->dm = dm;
6832                 work->acrtc = acrtc;
6833                 work->enable = enable;
6834
6835                 if (acrtc_state->stream) {
6836                         dc_stream_retain(acrtc_state->stream);
6837                         work->stream = acrtc_state->stream;
6838                 }
6839
6840                 queue_work(dm->vblank_control_workqueue, &work->work);
6841         }
6842
6843         return 0;
6844 }
6845
6846 static int dm_enable_vblank(struct drm_crtc *crtc)
6847 {
6848         return dm_set_vblank(crtc, true);
6849 }
6850
6851 static void dm_disable_vblank(struct drm_crtc *crtc)
6852 {
6853         dm_set_vblank(crtc, false);
6854 }
6855
6856 /* Implemented only the options currently available for the driver */
6857 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6858         .reset = dm_crtc_reset_state,
6859         .destroy = amdgpu_dm_crtc_destroy,
6860         .set_config = drm_atomic_helper_set_config,
6861         .page_flip = drm_atomic_helper_page_flip,
6862         .atomic_duplicate_state = dm_crtc_duplicate_state,
6863         .atomic_destroy_state = dm_crtc_destroy_state,
6864         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6865         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6866         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6867         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6868         .enable_vblank = dm_enable_vblank,
6869         .disable_vblank = dm_disable_vblank,
6870         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6871 #if defined(CONFIG_DEBUG_FS)
6872         .late_register = amdgpu_dm_crtc_late_register,
6873 #endif
6874 };
6875
6876 static enum drm_connector_status
6877 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6878 {
6879         bool connected;
6880         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6881
6882         /*
6883          * Notes:
6884          * 1. This interface is NOT called in context of HPD irq.
6885          * 2. This interface *is called* in context of user-mode ioctl. Which
6886          * makes it a bad place for *any* MST-related activity.
6887          */
6888
6889         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6890             !aconnector->fake_enable)
6891                 connected = (aconnector->dc_sink != NULL);
6892         else
6893                 connected = (aconnector->base.force == DRM_FORCE_ON);
6894
6895         update_subconnector_property(aconnector);
6896
6897         return (connected ? connector_status_connected :
6898                         connector_status_disconnected);
6899 }
6900
6901 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6902                                             struct drm_connector_state *connector_state,
6903                                             struct drm_property *property,
6904                                             uint64_t val)
6905 {
6906         struct drm_device *dev = connector->dev;
6907         struct amdgpu_device *adev = drm_to_adev(dev);
6908         struct dm_connector_state *dm_old_state =
6909                 to_dm_connector_state(connector->state);
6910         struct dm_connector_state *dm_new_state =
6911                 to_dm_connector_state(connector_state);
6912
6913         int ret = -EINVAL;
6914
6915         if (property == dev->mode_config.scaling_mode_property) {
6916                 enum amdgpu_rmx_type rmx_type;
6917
6918                 switch (val) {
6919                 case DRM_MODE_SCALE_CENTER:
6920                         rmx_type = RMX_CENTER;
6921                         break;
6922                 case DRM_MODE_SCALE_ASPECT:
6923                         rmx_type = RMX_ASPECT;
6924                         break;
6925                 case DRM_MODE_SCALE_FULLSCREEN:
6926                         rmx_type = RMX_FULL;
6927                         break;
6928                 case DRM_MODE_SCALE_NONE:
6929                 default:
6930                         rmx_type = RMX_OFF;
6931                         break;
6932                 }
6933
6934                 if (dm_old_state->scaling == rmx_type)
6935                         return 0;
6936
6937                 dm_new_state->scaling = rmx_type;
6938                 ret = 0;
6939         } else if (property == adev->mode_info.underscan_hborder_property) {
6940                 dm_new_state->underscan_hborder = val;
6941                 ret = 0;
6942         } else if (property == adev->mode_info.underscan_vborder_property) {
6943                 dm_new_state->underscan_vborder = val;
6944                 ret = 0;
6945         } else if (property == adev->mode_info.underscan_property) {
6946                 dm_new_state->underscan_enable = val;
6947                 ret = 0;
6948         } else if (property == adev->mode_info.abm_level_property) {
6949                 dm_new_state->abm_level = val;
6950                 ret = 0;
6951         }
6952
6953         return ret;
6954 }
6955
6956 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6957                                             const struct drm_connector_state *state,
6958                                             struct drm_property *property,
6959                                             uint64_t *val)
6960 {
6961         struct drm_device *dev = connector->dev;
6962         struct amdgpu_device *adev = drm_to_adev(dev);
6963         struct dm_connector_state *dm_state =
6964                 to_dm_connector_state(state);
6965         int ret = -EINVAL;
6966
6967         if (property == dev->mode_config.scaling_mode_property) {
6968                 switch (dm_state->scaling) {
6969                 case RMX_CENTER:
6970                         *val = DRM_MODE_SCALE_CENTER;
6971                         break;
6972                 case RMX_ASPECT:
6973                         *val = DRM_MODE_SCALE_ASPECT;
6974                         break;
6975                 case RMX_FULL:
6976                         *val = DRM_MODE_SCALE_FULLSCREEN;
6977                         break;
6978                 case RMX_OFF:
6979                 default:
6980                         *val = DRM_MODE_SCALE_NONE;
6981                         break;
6982                 }
6983                 ret = 0;
6984         } else if (property == adev->mode_info.underscan_hborder_property) {
6985                 *val = dm_state->underscan_hborder;
6986                 ret = 0;
6987         } else if (property == adev->mode_info.underscan_vborder_property) {
6988                 *val = dm_state->underscan_vborder;
6989                 ret = 0;
6990         } else if (property == adev->mode_info.underscan_property) {
6991                 *val = dm_state->underscan_enable;
6992                 ret = 0;
6993         } else if (property == adev->mode_info.abm_level_property) {
6994                 *val = dm_state->abm_level;
6995                 ret = 0;
6996         }
6997
6998         return ret;
6999 }
7000
7001 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
7002 {
7003         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
7004
7005         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
7006 }
7007
7008 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
7009 {
7010         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7011         const struct dc_link *link = aconnector->dc_link;
7012         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7013         struct amdgpu_display_manager *dm = &adev->dm;
7014         int i;
7015
7016         /*
7017          * Call only if mst_mgr was iniitalized before since it's not done
7018          * for all connector types.
7019          */
7020         if (aconnector->mst_mgr.dev)
7021                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
7022
7023         for (i = 0; i < dm->num_of_edps; i++) {
7024                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
7025                         backlight_device_unregister(dm->backlight_dev[i]);
7026                         dm->backlight_dev[i] = NULL;
7027                 }
7028         }
7029
7030         if (aconnector->dc_em_sink)
7031                 dc_sink_release(aconnector->dc_em_sink);
7032         aconnector->dc_em_sink = NULL;
7033         if (aconnector->dc_sink)
7034                 dc_sink_release(aconnector->dc_sink);
7035         aconnector->dc_sink = NULL;
7036
7037         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
7038         drm_connector_unregister(connector);
7039         drm_connector_cleanup(connector);
7040         if (aconnector->i2c) {
7041                 i2c_del_adapter(&aconnector->i2c->base);
7042                 kfree(aconnector->i2c);
7043         }
7044         kfree(aconnector->dm_dp_aux.aux.name);
7045
7046         kfree(connector);
7047 }
7048
7049 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
7050 {
7051         struct dm_connector_state *state =
7052                 to_dm_connector_state(connector->state);
7053
7054         if (connector->state)
7055                 __drm_atomic_helper_connector_destroy_state(connector->state);
7056
7057         kfree(state);
7058
7059         state = kzalloc(sizeof(*state), GFP_KERNEL);
7060
7061         if (state) {
7062                 state->scaling = RMX_OFF;
7063                 state->underscan_enable = false;
7064                 state->underscan_hborder = 0;
7065                 state->underscan_vborder = 0;
7066                 state->base.max_requested_bpc = 8;
7067                 state->vcpi_slots = 0;
7068                 state->pbn = 0;
7069                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
7070                         state->abm_level = amdgpu_dm_abm_level;
7071
7072                 __drm_atomic_helper_connector_reset(connector, &state->base);
7073         }
7074 }
7075
7076 struct drm_connector_state *
7077 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
7078 {
7079         struct dm_connector_state *state =
7080                 to_dm_connector_state(connector->state);
7081
7082         struct dm_connector_state *new_state =
7083                         kmemdup(state, sizeof(*state), GFP_KERNEL);
7084
7085         if (!new_state)
7086                 return NULL;
7087
7088         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
7089
7090         new_state->freesync_capable = state->freesync_capable;
7091         new_state->abm_level = state->abm_level;
7092         new_state->scaling = state->scaling;
7093         new_state->underscan_enable = state->underscan_enable;
7094         new_state->underscan_hborder = state->underscan_hborder;
7095         new_state->underscan_vborder = state->underscan_vborder;
7096         new_state->vcpi_slots = state->vcpi_slots;
7097         new_state->pbn = state->pbn;
7098         return &new_state->base;
7099 }
7100
7101 static int
7102 amdgpu_dm_connector_late_register(struct drm_connector *connector)
7103 {
7104         struct amdgpu_dm_connector *amdgpu_dm_connector =
7105                 to_amdgpu_dm_connector(connector);
7106         int r;
7107
7108         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7109             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7110                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7111                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7112                 if (r)
7113                         return r;
7114         }
7115
7116 #if defined(CONFIG_DEBUG_FS)
7117         connector_debugfs_init(amdgpu_dm_connector);
7118 #endif
7119
7120         return 0;
7121 }
7122
7123 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7124         .reset = amdgpu_dm_connector_funcs_reset,
7125         .detect = amdgpu_dm_connector_detect,
7126         .fill_modes = drm_helper_probe_single_connector_modes,
7127         .destroy = amdgpu_dm_connector_destroy,
7128         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7129         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7130         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
7131         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
7132         .late_register = amdgpu_dm_connector_late_register,
7133         .early_unregister = amdgpu_dm_connector_unregister
7134 };
7135
7136 static int get_modes(struct drm_connector *connector)
7137 {
7138         return amdgpu_dm_connector_get_modes(connector);
7139 }
7140
7141 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
7142 {
7143         struct dc_sink_init_data init_params = {
7144                         .link = aconnector->dc_link,
7145                         .sink_signal = SIGNAL_TYPE_VIRTUAL
7146         };
7147         struct edid *edid;
7148
7149         if (!aconnector->base.edid_blob_ptr) {
7150                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7151                                 aconnector->base.name);
7152
7153                 aconnector->base.force = DRM_FORCE_OFF;
7154                 aconnector->base.override_edid = false;
7155                 return;
7156         }
7157
7158         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7159
7160         aconnector->edid = edid;
7161
7162         aconnector->dc_em_sink = dc_link_add_remote_sink(
7163                 aconnector->dc_link,
7164                 (uint8_t *)edid,
7165                 (edid->extensions + 1) * EDID_LENGTH,
7166                 &init_params);
7167
7168         if (aconnector->base.force == DRM_FORCE_ON) {
7169                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7170                 aconnector->dc_link->local_sink :
7171                 aconnector->dc_em_sink;
7172                 dc_sink_retain(aconnector->dc_sink);
7173         }
7174 }
7175
7176 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7177 {
7178         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7179
7180         /*
7181          * In case of headless boot with force on for DP managed connector
7182          * Those settings have to be != 0 to get initial modeset
7183          */
7184         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7185                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7186                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7187         }
7188
7189
7190         aconnector->base.override_edid = true;
7191         create_eml_sink(aconnector);
7192 }
7193
7194 struct dc_stream_state *
7195 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7196                                 const struct drm_display_mode *drm_mode,
7197                                 const struct dm_connector_state *dm_state,
7198                                 const struct dc_stream_state *old_stream)
7199 {
7200         struct drm_connector *connector = &aconnector->base;
7201         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7202         struct dc_stream_state *stream;
7203         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7204         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7205         enum dc_status dc_result = DC_OK;
7206
7207         do {
7208                 stream = create_stream_for_sink(aconnector, drm_mode,
7209                                                 dm_state, old_stream,
7210                                                 requested_bpc);
7211                 if (stream == NULL) {
7212                         DRM_ERROR("Failed to create stream for sink!\n");
7213                         break;
7214                 }
7215
7216                 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
7217                         dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
7218
7219                 if (dc_result == DC_OK)
7220                         dc_result = dc_validate_stream(adev->dm.dc, stream);
7221
7222                 if (dc_result != DC_OK) {
7223                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7224                                       drm_mode->hdisplay,
7225                                       drm_mode->vdisplay,
7226                                       drm_mode->clock,
7227                                       dc_result,
7228                                       dc_status_to_str(dc_result));
7229
7230                         dc_stream_release(stream);
7231                         stream = NULL;
7232                         requested_bpc -= 2; /* lower bpc to retry validation */
7233                 }
7234
7235         } while (stream == NULL && requested_bpc >= 6);
7236
7237         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7238                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7239
7240                 aconnector->force_yuv420_output = true;
7241                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7242                                                 dm_state, old_stream);
7243                 aconnector->force_yuv420_output = false;
7244         }
7245
7246         return stream;
7247 }
7248
7249 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7250                                    struct drm_display_mode *mode)
7251 {
7252         int result = MODE_ERROR;
7253         struct dc_sink *dc_sink;
7254         /* TODO: Unhardcode stream count */
7255         struct dc_stream_state *stream;
7256         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7257
7258         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7259                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7260                 return result;
7261
7262         /*
7263          * Only run this the first time mode_valid is called to initilialize
7264          * EDID mgmt
7265          */
7266         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7267                 !aconnector->dc_em_sink)
7268                 handle_edid_mgmt(aconnector);
7269
7270         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7271
7272         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7273                                 aconnector->base.force != DRM_FORCE_ON) {
7274                 DRM_ERROR("dc_sink is NULL!\n");
7275                 goto fail;
7276         }
7277
7278         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7279         if (stream) {
7280                 dc_stream_release(stream);
7281                 result = MODE_OK;
7282         }
7283
7284 fail:
7285         /* TODO: error handling*/
7286         return result;
7287 }
7288
7289 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7290                                 struct dc_info_packet *out)
7291 {
7292         struct hdmi_drm_infoframe frame;
7293         unsigned char buf[30]; /* 26 + 4 */
7294         ssize_t len;
7295         int ret, i;
7296
7297         memset(out, 0, sizeof(*out));
7298
7299         if (!state->hdr_output_metadata)
7300                 return 0;
7301
7302         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7303         if (ret)
7304                 return ret;
7305
7306         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7307         if (len < 0)
7308                 return (int)len;
7309
7310         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7311         if (len != 30)
7312                 return -EINVAL;
7313
7314         /* Prepare the infopacket for DC. */
7315         switch (state->connector->connector_type) {
7316         case DRM_MODE_CONNECTOR_HDMIA:
7317                 out->hb0 = 0x87; /* type */
7318                 out->hb1 = 0x01; /* version */
7319                 out->hb2 = 0x1A; /* length */
7320                 out->sb[0] = buf[3]; /* checksum */
7321                 i = 1;
7322                 break;
7323
7324         case DRM_MODE_CONNECTOR_DisplayPort:
7325         case DRM_MODE_CONNECTOR_eDP:
7326                 out->hb0 = 0x00; /* sdp id, zero */
7327                 out->hb1 = 0x87; /* type */
7328                 out->hb2 = 0x1D; /* payload len - 1 */
7329                 out->hb3 = (0x13 << 2); /* sdp version */
7330                 out->sb[0] = 0x01; /* version */
7331                 out->sb[1] = 0x1A; /* length */
7332                 i = 2;
7333                 break;
7334
7335         default:
7336                 return -EINVAL;
7337         }
7338
7339         memcpy(&out->sb[i], &buf[4], 26);
7340         out->valid = true;
7341
7342         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7343                        sizeof(out->sb), false);
7344
7345         return 0;
7346 }
7347
7348 static int
7349 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7350                                  struct drm_atomic_state *state)
7351 {
7352         struct drm_connector_state *new_con_state =
7353                 drm_atomic_get_new_connector_state(state, conn);
7354         struct drm_connector_state *old_con_state =
7355                 drm_atomic_get_old_connector_state(state, conn);
7356         struct drm_crtc *crtc = new_con_state->crtc;
7357         struct drm_crtc_state *new_crtc_state;
7358         int ret;
7359
7360         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7361
7362         if (!crtc)
7363                 return 0;
7364
7365         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7366                 struct dc_info_packet hdr_infopacket;
7367
7368                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7369                 if (ret)
7370                         return ret;
7371
7372                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7373                 if (IS_ERR(new_crtc_state))
7374                         return PTR_ERR(new_crtc_state);
7375
7376                 /*
7377                  * DC considers the stream backends changed if the
7378                  * static metadata changes. Forcing the modeset also
7379                  * gives a simple way for userspace to switch from
7380                  * 8bpc to 10bpc when setting the metadata to enter
7381                  * or exit HDR.
7382                  *
7383                  * Changing the static metadata after it's been
7384                  * set is permissible, however. So only force a
7385                  * modeset if we're entering or exiting HDR.
7386                  */
7387                 new_crtc_state->mode_changed =
7388                         !old_con_state->hdr_output_metadata ||
7389                         !new_con_state->hdr_output_metadata;
7390         }
7391
7392         return 0;
7393 }
7394
7395 static const struct drm_connector_helper_funcs
7396 amdgpu_dm_connector_helper_funcs = {
7397         /*
7398          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7399          * modes will be filtered by drm_mode_validate_size(), and those modes
7400          * are missing after user start lightdm. So we need to renew modes list.
7401          * in get_modes call back, not just return the modes count
7402          */
7403         .get_modes = get_modes,
7404         .mode_valid = amdgpu_dm_connector_mode_valid,
7405         .atomic_check = amdgpu_dm_connector_atomic_check,
7406 };
7407
7408 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7409 {
7410 }
7411
7412 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7413 {
7414         struct drm_atomic_state *state = new_crtc_state->state;
7415         struct drm_plane *plane;
7416         int num_active = 0;
7417
7418         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7419                 struct drm_plane_state *new_plane_state;
7420
7421                 /* Cursor planes are "fake". */
7422                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7423                         continue;
7424
7425                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7426
7427                 if (!new_plane_state) {
7428                         /*
7429                          * The plane is enable on the CRTC and hasn't changed
7430                          * state. This means that it previously passed
7431                          * validation and is therefore enabled.
7432                          */
7433                         num_active += 1;
7434                         continue;
7435                 }
7436
7437                 /* We need a framebuffer to be considered enabled. */
7438                 num_active += (new_plane_state->fb != NULL);
7439         }
7440
7441         return num_active;
7442 }
7443
7444 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7445                                          struct drm_crtc_state *new_crtc_state)
7446 {
7447         struct dm_crtc_state *dm_new_crtc_state =
7448                 to_dm_crtc_state(new_crtc_state);
7449
7450         dm_new_crtc_state->active_planes = 0;
7451
7452         if (!dm_new_crtc_state->stream)
7453                 return;
7454
7455         dm_new_crtc_state->active_planes =
7456                 count_crtc_active_planes(new_crtc_state);
7457 }
7458
7459 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7460                                        struct drm_atomic_state *state)
7461 {
7462         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7463                                                                           crtc);
7464         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7465         struct dc *dc = adev->dm.dc;
7466         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7467         int ret = -EINVAL;
7468
7469         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7470
7471         dm_update_crtc_active_planes(crtc, crtc_state);
7472
7473         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7474                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7475                 return ret;
7476         }
7477
7478         /*
7479          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7480          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7481          * planes are disabled, which is not supported by the hardware. And there is legacy
7482          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7483          */
7484         if (crtc_state->enable &&
7485             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7486                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7487                 return -EINVAL;
7488         }
7489
7490         /* In some use cases, like reset, no stream is attached */
7491         if (!dm_crtc_state->stream)
7492                 return 0;
7493
7494         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7495                 return 0;
7496
7497         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7498         return ret;
7499 }
7500
7501 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7502                                       const struct drm_display_mode *mode,
7503                                       struct drm_display_mode *adjusted_mode)
7504 {
7505         return true;
7506 }
7507
7508 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7509         .disable = dm_crtc_helper_disable,
7510         .atomic_check = dm_crtc_helper_atomic_check,
7511         .mode_fixup = dm_crtc_helper_mode_fixup,
7512         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7513 };
7514
7515 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7516 {
7517
7518 }
7519
7520 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
7521 {
7522         switch (display_color_depth) {
7523                 case COLOR_DEPTH_666:
7524                         return 6;
7525                 case COLOR_DEPTH_888:
7526                         return 8;
7527                 case COLOR_DEPTH_101010:
7528                         return 10;
7529                 case COLOR_DEPTH_121212:
7530                         return 12;
7531                 case COLOR_DEPTH_141414:
7532                         return 14;
7533                 case COLOR_DEPTH_161616:
7534                         return 16;
7535                 default:
7536                         break;
7537                 }
7538         return 0;
7539 }
7540
7541 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7542                                           struct drm_crtc_state *crtc_state,
7543                                           struct drm_connector_state *conn_state)
7544 {
7545         struct drm_atomic_state *state = crtc_state->state;
7546         struct drm_connector *connector = conn_state->connector;
7547         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7548         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7549         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7550         struct drm_dp_mst_topology_mgr *mst_mgr;
7551         struct drm_dp_mst_port *mst_port;
7552         enum dc_color_depth color_depth;
7553         int clock, bpp = 0;
7554         bool is_y420 = false;
7555
7556         if (!aconnector->port || !aconnector->dc_sink)
7557                 return 0;
7558
7559         mst_port = aconnector->port;
7560         mst_mgr = &aconnector->mst_port->mst_mgr;
7561
7562         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7563                 return 0;
7564
7565         if (!state->duplicated) {
7566                 int max_bpc = conn_state->max_requested_bpc;
7567                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7568                                 aconnector->force_yuv420_output;
7569                 color_depth = convert_color_depth_from_display_info(connector,
7570                                                                     is_y420,
7571                                                                     max_bpc);
7572                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7573                 clock = adjusted_mode->clock;
7574                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7575         }
7576         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7577                                                                            mst_mgr,
7578                                                                            mst_port,
7579                                                                            dm_new_connector_state->pbn,
7580                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7581         if (dm_new_connector_state->vcpi_slots < 0) {
7582                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7583                 return dm_new_connector_state->vcpi_slots;
7584         }
7585         return 0;
7586 }
7587
7588 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7589         .disable = dm_encoder_helper_disable,
7590         .atomic_check = dm_encoder_helper_atomic_check
7591 };
7592
7593 #if defined(CONFIG_DRM_AMD_DC_DCN)
7594 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7595                                             struct dc_state *dc_state,
7596                                             struct dsc_mst_fairness_vars *vars)
7597 {
7598         struct dc_stream_state *stream = NULL;
7599         struct drm_connector *connector;
7600         struct drm_connector_state *new_con_state;
7601         struct amdgpu_dm_connector *aconnector;
7602         struct dm_connector_state *dm_conn_state;
7603         int i, j;
7604         int vcpi, pbn_div, pbn, slot_num = 0;
7605
7606         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7607
7608                 aconnector = to_amdgpu_dm_connector(connector);
7609
7610                 if (!aconnector->port)
7611                         continue;
7612
7613                 if (!new_con_state || !new_con_state->crtc)
7614                         continue;
7615
7616                 dm_conn_state = to_dm_connector_state(new_con_state);
7617
7618                 for (j = 0; j < dc_state->stream_count; j++) {
7619                         stream = dc_state->streams[j];
7620                         if (!stream)
7621                                 continue;
7622
7623                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7624                                 break;
7625
7626                         stream = NULL;
7627                 }
7628
7629                 if (!stream)
7630                         continue;
7631
7632                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7633                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7634                 for (j = 0; j < dc_state->stream_count; j++) {
7635                         if (vars[j].aconnector == aconnector) {
7636                                 pbn = vars[j].pbn;
7637                                 break;
7638                         }
7639                 }
7640
7641                 if (j == dc_state->stream_count)
7642                         continue;
7643
7644                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7645
7646                 if (stream->timing.flags.DSC != 1) {
7647                         dm_conn_state->pbn = pbn;
7648                         dm_conn_state->vcpi_slots = slot_num;
7649
7650                         drm_dp_mst_atomic_enable_dsc(state,
7651                                                      aconnector->port,
7652                                                      dm_conn_state->pbn,
7653                                                      0,
7654                                                      false);
7655                         continue;
7656                 }
7657
7658                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7659                                                     aconnector->port,
7660                                                     pbn, pbn_div,
7661                                                     true);
7662                 if (vcpi < 0)
7663                         return vcpi;
7664
7665                 dm_conn_state->pbn = pbn;
7666                 dm_conn_state->vcpi_slots = vcpi;
7667         }
7668         return 0;
7669 }
7670 #endif
7671
7672 static void dm_drm_plane_reset(struct drm_plane *plane)
7673 {
7674         struct dm_plane_state *amdgpu_state = NULL;
7675
7676         if (plane->state)
7677                 plane->funcs->atomic_destroy_state(plane, plane->state);
7678
7679         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7680         WARN_ON(amdgpu_state == NULL);
7681
7682         if (amdgpu_state)
7683                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7684 }
7685
7686 static struct drm_plane_state *
7687 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7688 {
7689         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7690
7691         old_dm_plane_state = to_dm_plane_state(plane->state);
7692         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7693         if (!dm_plane_state)
7694                 return NULL;
7695
7696         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7697
7698         if (old_dm_plane_state->dc_state) {
7699                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7700                 dc_plane_state_retain(dm_plane_state->dc_state);
7701         }
7702
7703         return &dm_plane_state->base;
7704 }
7705
7706 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7707                                 struct drm_plane_state *state)
7708 {
7709         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7710
7711         if (dm_plane_state->dc_state)
7712                 dc_plane_state_release(dm_plane_state->dc_state);
7713
7714         drm_atomic_helper_plane_destroy_state(plane, state);
7715 }
7716
7717 static const struct drm_plane_funcs dm_plane_funcs = {
7718         .update_plane   = drm_atomic_helper_update_plane,
7719         .disable_plane  = drm_atomic_helper_disable_plane,
7720         .destroy        = drm_primary_helper_destroy,
7721         .reset = dm_drm_plane_reset,
7722         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7723         .atomic_destroy_state = dm_drm_plane_destroy_state,
7724         .format_mod_supported = dm_plane_format_mod_supported,
7725 };
7726
7727 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7728                                       struct drm_plane_state *new_state)
7729 {
7730         struct amdgpu_framebuffer *afb;
7731         struct drm_gem_object *obj;
7732         struct amdgpu_device *adev;
7733         struct amdgpu_bo *rbo;
7734         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7735         uint32_t domain;
7736         int r;
7737
7738         if (!new_state->fb) {
7739                 DRM_DEBUG_KMS("No FB bound\n");
7740                 return 0;
7741         }
7742
7743         afb = to_amdgpu_framebuffer(new_state->fb);
7744         obj = new_state->fb->obj[0];
7745         rbo = gem_to_amdgpu_bo(obj);
7746         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7747
7748         r = amdgpu_bo_reserve(rbo, true);
7749         if (r) {
7750                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7751                 return r;
7752         }
7753
7754         r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7755         if (r) {
7756                 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7757                 goto error_unlock;
7758         }
7759
7760         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7761                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7762         else
7763                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7764
7765         r = amdgpu_bo_pin(rbo, domain);
7766         if (unlikely(r != 0)) {
7767                 if (r != -ERESTARTSYS)
7768                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7769                 goto error_unlock;
7770         }
7771
7772         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7773         if (unlikely(r != 0)) {
7774                 DRM_ERROR("%p bind failed\n", rbo);
7775                 goto error_unpin;
7776         }
7777
7778         r = drm_gem_plane_helper_prepare_fb(plane, new_state);
7779         if (unlikely(r != 0))
7780                 goto error_unpin;
7781
7782         amdgpu_bo_unreserve(rbo);
7783
7784         afb->address = amdgpu_bo_gpu_offset(rbo);
7785
7786         amdgpu_bo_ref(rbo);
7787
7788         /**
7789          * We don't do surface updates on planes that have been newly created,
7790          * but we also don't have the afb->address during atomic check.
7791          *
7792          * Fill in buffer attributes depending on the address here, but only on
7793          * newly created planes since they're not being used by DC yet and this
7794          * won't modify global state.
7795          */
7796         dm_plane_state_old = to_dm_plane_state(plane->state);
7797         dm_plane_state_new = to_dm_plane_state(new_state);
7798
7799         if (dm_plane_state_new->dc_state &&
7800             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7801                 struct dc_plane_state *plane_state =
7802                         dm_plane_state_new->dc_state;
7803                 bool force_disable_dcc = !plane_state->dcc.enable;
7804
7805                 fill_plane_buffer_attributes(
7806                         adev, afb, plane_state->format, plane_state->rotation,
7807                         afb->tiling_flags,
7808                         &plane_state->tiling_info, &plane_state->plane_size,
7809                         &plane_state->dcc, &plane_state->address,
7810                         afb->tmz_surface, force_disable_dcc);
7811         }
7812
7813         return 0;
7814
7815 error_unpin:
7816         amdgpu_bo_unpin(rbo);
7817
7818 error_unlock:
7819         amdgpu_bo_unreserve(rbo);
7820         return r;
7821 }
7822
7823 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7824                                        struct drm_plane_state *old_state)
7825 {
7826         struct amdgpu_bo *rbo;
7827         int r;
7828
7829         if (!old_state->fb)
7830                 return;
7831
7832         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7833         r = amdgpu_bo_reserve(rbo, false);
7834         if (unlikely(r)) {
7835                 DRM_ERROR("failed to reserve rbo before unpin\n");
7836                 return;
7837         }
7838
7839         amdgpu_bo_unpin(rbo);
7840         amdgpu_bo_unreserve(rbo);
7841         amdgpu_bo_unref(&rbo);
7842 }
7843
7844 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7845                                        struct drm_crtc_state *new_crtc_state)
7846 {
7847         struct drm_framebuffer *fb = state->fb;
7848         int min_downscale, max_upscale;
7849         int min_scale = 0;
7850         int max_scale = INT_MAX;
7851
7852         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7853         if (fb && state->crtc) {
7854                 /* Validate viewport to cover the case when only the position changes */
7855                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7856                         int viewport_width = state->crtc_w;
7857                         int viewport_height = state->crtc_h;
7858
7859                         if (state->crtc_x < 0)
7860                                 viewport_width += state->crtc_x;
7861                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7862                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7863
7864                         if (state->crtc_y < 0)
7865                                 viewport_height += state->crtc_y;
7866                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7867                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7868
7869                         if (viewport_width < 0 || viewport_height < 0) {
7870                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7871                                 return -EINVAL;
7872                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7873                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7874                                 return -EINVAL;
7875                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7876                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7877                                 return -EINVAL;
7878                         }
7879
7880                 }
7881
7882                 /* Get min/max allowed scaling factors from plane caps. */
7883                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7884                                              &min_downscale, &max_upscale);
7885                 /*
7886                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7887                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7888                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7889                  */
7890                 min_scale = (1000 << 16) / max_upscale;
7891                 max_scale = (1000 << 16) / min_downscale;
7892         }
7893
7894         return drm_atomic_helper_check_plane_state(
7895                 state, new_crtc_state, min_scale, max_scale, true, true);
7896 }
7897
7898 static int dm_plane_atomic_check(struct drm_plane *plane,
7899                                  struct drm_atomic_state *state)
7900 {
7901         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7902                                                                                  plane);
7903         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7904         struct dc *dc = adev->dm.dc;
7905         struct dm_plane_state *dm_plane_state;
7906         struct dc_scaling_info scaling_info;
7907         struct drm_crtc_state *new_crtc_state;
7908         int ret;
7909
7910         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7911
7912         dm_plane_state = to_dm_plane_state(new_plane_state);
7913
7914         if (!dm_plane_state->dc_state)
7915                 return 0;
7916
7917         new_crtc_state =
7918                 drm_atomic_get_new_crtc_state(state,
7919                                               new_plane_state->crtc);
7920         if (!new_crtc_state)
7921                 return -EINVAL;
7922
7923         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7924         if (ret)
7925                 return ret;
7926
7927         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7928         if (ret)
7929                 return ret;
7930
7931         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7932                 return 0;
7933
7934         return -EINVAL;
7935 }
7936
7937 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7938                                        struct drm_atomic_state *state)
7939 {
7940         /* Only support async updates on cursor planes. */
7941         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7942                 return -EINVAL;
7943
7944         return 0;
7945 }
7946
7947 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7948                                          struct drm_atomic_state *state)
7949 {
7950         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7951                                                                            plane);
7952         struct drm_plane_state *old_state =
7953                 drm_atomic_get_old_plane_state(state, plane);
7954
7955         trace_amdgpu_dm_atomic_update_cursor(new_state);
7956
7957         swap(plane->state->fb, new_state->fb);
7958
7959         plane->state->src_x = new_state->src_x;
7960         plane->state->src_y = new_state->src_y;
7961         plane->state->src_w = new_state->src_w;
7962         plane->state->src_h = new_state->src_h;
7963         plane->state->crtc_x = new_state->crtc_x;
7964         plane->state->crtc_y = new_state->crtc_y;
7965         plane->state->crtc_w = new_state->crtc_w;
7966         plane->state->crtc_h = new_state->crtc_h;
7967
7968         handle_cursor_update(plane, old_state);
7969 }
7970
7971 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7972         .prepare_fb = dm_plane_helper_prepare_fb,
7973         .cleanup_fb = dm_plane_helper_cleanup_fb,
7974         .atomic_check = dm_plane_atomic_check,
7975         .atomic_async_check = dm_plane_atomic_async_check,
7976         .atomic_async_update = dm_plane_atomic_async_update
7977 };
7978
7979 /*
7980  * TODO: these are currently initialized to rgb formats only.
7981  * For future use cases we should either initialize them dynamically based on
7982  * plane capabilities, or initialize this array to all formats, so internal drm
7983  * check will succeed, and let DC implement proper check
7984  */
7985 static const uint32_t rgb_formats[] = {
7986         DRM_FORMAT_XRGB8888,
7987         DRM_FORMAT_ARGB8888,
7988         DRM_FORMAT_RGBA8888,
7989         DRM_FORMAT_XRGB2101010,
7990         DRM_FORMAT_XBGR2101010,
7991         DRM_FORMAT_ARGB2101010,
7992         DRM_FORMAT_ABGR2101010,
7993         DRM_FORMAT_XRGB16161616,
7994         DRM_FORMAT_XBGR16161616,
7995         DRM_FORMAT_ARGB16161616,
7996         DRM_FORMAT_ABGR16161616,
7997         DRM_FORMAT_XBGR8888,
7998         DRM_FORMAT_ABGR8888,
7999         DRM_FORMAT_RGB565,
8000 };
8001
8002 static const uint32_t overlay_formats[] = {
8003         DRM_FORMAT_XRGB8888,
8004         DRM_FORMAT_ARGB8888,
8005         DRM_FORMAT_RGBA8888,
8006         DRM_FORMAT_XBGR8888,
8007         DRM_FORMAT_ABGR8888,
8008         DRM_FORMAT_RGB565
8009 };
8010
8011 static const u32 cursor_formats[] = {
8012         DRM_FORMAT_ARGB8888
8013 };
8014
8015 static int get_plane_formats(const struct drm_plane *plane,
8016                              const struct dc_plane_cap *plane_cap,
8017                              uint32_t *formats, int max_formats)
8018 {
8019         int i, num_formats = 0;
8020
8021         /*
8022          * TODO: Query support for each group of formats directly from
8023          * DC plane caps. This will require adding more formats to the
8024          * caps list.
8025          */
8026
8027         switch (plane->type) {
8028         case DRM_PLANE_TYPE_PRIMARY:
8029                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
8030                         if (num_formats >= max_formats)
8031                                 break;
8032
8033                         formats[num_formats++] = rgb_formats[i];
8034                 }
8035
8036                 if (plane_cap && plane_cap->pixel_format_support.nv12)
8037                         formats[num_formats++] = DRM_FORMAT_NV12;
8038                 if (plane_cap && plane_cap->pixel_format_support.p010)
8039                         formats[num_formats++] = DRM_FORMAT_P010;
8040                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
8041                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
8042                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
8043                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
8044                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
8045                 }
8046                 break;
8047
8048         case DRM_PLANE_TYPE_OVERLAY:
8049                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
8050                         if (num_formats >= max_formats)
8051                                 break;
8052
8053                         formats[num_formats++] = overlay_formats[i];
8054                 }
8055                 break;
8056
8057         case DRM_PLANE_TYPE_CURSOR:
8058                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
8059                         if (num_formats >= max_formats)
8060                                 break;
8061
8062                         formats[num_formats++] = cursor_formats[i];
8063                 }
8064                 break;
8065         }
8066
8067         return num_formats;
8068 }
8069
8070 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
8071                                 struct drm_plane *plane,
8072                                 unsigned long possible_crtcs,
8073                                 const struct dc_plane_cap *plane_cap)
8074 {
8075         uint32_t formats[32];
8076         int num_formats;
8077         int res = -EPERM;
8078         unsigned int supported_rotations;
8079         uint64_t *modifiers = NULL;
8080
8081         num_formats = get_plane_formats(plane, plane_cap, formats,
8082                                         ARRAY_SIZE(formats));
8083
8084         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
8085         if (res)
8086                 return res;
8087
8088         if (modifiers == NULL)
8089                 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
8090
8091         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
8092                                        &dm_plane_funcs, formats, num_formats,
8093                                        modifiers, plane->type, NULL);
8094         kfree(modifiers);
8095         if (res)
8096                 return res;
8097
8098         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
8099             plane_cap && plane_cap->per_pixel_alpha) {
8100                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
8101                                           BIT(DRM_MODE_BLEND_PREMULTI) |
8102                                           BIT(DRM_MODE_BLEND_COVERAGE);
8103
8104                 drm_plane_create_alpha_property(plane);
8105                 drm_plane_create_blend_mode_property(plane, blend_caps);
8106         }
8107
8108         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
8109             plane_cap &&
8110             (plane_cap->pixel_format_support.nv12 ||
8111              plane_cap->pixel_format_support.p010)) {
8112                 /* This only affects YUV formats. */
8113                 drm_plane_create_color_properties(
8114                         plane,
8115                         BIT(DRM_COLOR_YCBCR_BT601) |
8116                         BIT(DRM_COLOR_YCBCR_BT709) |
8117                         BIT(DRM_COLOR_YCBCR_BT2020),
8118                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
8119                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
8120                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
8121         }
8122
8123         supported_rotations =
8124                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8125                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8126
8127         if (dm->adev->asic_type >= CHIP_BONAIRE &&
8128             plane->type != DRM_PLANE_TYPE_CURSOR)
8129                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8130                                                    supported_rotations);
8131
8132         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
8133
8134         /* Create (reset) the plane state */
8135         if (plane->funcs->reset)
8136                 plane->funcs->reset(plane);
8137
8138         return 0;
8139 }
8140
8141 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8142                                struct drm_plane *plane,
8143                                uint32_t crtc_index)
8144 {
8145         struct amdgpu_crtc *acrtc = NULL;
8146         struct drm_plane *cursor_plane;
8147
8148         int res = -ENOMEM;
8149
8150         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8151         if (!cursor_plane)
8152                 goto fail;
8153
8154         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8155         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8156
8157         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8158         if (!acrtc)
8159                 goto fail;
8160
8161         res = drm_crtc_init_with_planes(
8162                         dm->ddev,
8163                         &acrtc->base,
8164                         plane,
8165                         cursor_plane,
8166                         &amdgpu_dm_crtc_funcs, NULL);
8167
8168         if (res)
8169                 goto fail;
8170
8171         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8172
8173         /* Create (reset) the plane state */
8174         if (acrtc->base.funcs->reset)
8175                 acrtc->base.funcs->reset(&acrtc->base);
8176
8177         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8178         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8179
8180         acrtc->crtc_id = crtc_index;
8181         acrtc->base.enabled = false;
8182         acrtc->otg_inst = -1;
8183
8184         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8185         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8186                                    true, MAX_COLOR_LUT_ENTRIES);
8187         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8188
8189         return 0;
8190
8191 fail:
8192         kfree(acrtc);
8193         kfree(cursor_plane);
8194         return res;
8195 }
8196
8197
8198 static int to_drm_connector_type(enum signal_type st)
8199 {
8200         switch (st) {
8201         case SIGNAL_TYPE_HDMI_TYPE_A:
8202                 return DRM_MODE_CONNECTOR_HDMIA;
8203         case SIGNAL_TYPE_EDP:
8204                 return DRM_MODE_CONNECTOR_eDP;
8205         case SIGNAL_TYPE_LVDS:
8206                 return DRM_MODE_CONNECTOR_LVDS;
8207         case SIGNAL_TYPE_RGB:
8208                 return DRM_MODE_CONNECTOR_VGA;
8209         case SIGNAL_TYPE_DISPLAY_PORT:
8210         case SIGNAL_TYPE_DISPLAY_PORT_MST:
8211                 return DRM_MODE_CONNECTOR_DisplayPort;
8212         case SIGNAL_TYPE_DVI_DUAL_LINK:
8213         case SIGNAL_TYPE_DVI_SINGLE_LINK:
8214                 return DRM_MODE_CONNECTOR_DVID;
8215         case SIGNAL_TYPE_VIRTUAL:
8216                 return DRM_MODE_CONNECTOR_VIRTUAL;
8217
8218         default:
8219                 return DRM_MODE_CONNECTOR_Unknown;
8220         }
8221 }
8222
8223 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8224 {
8225         struct drm_encoder *encoder;
8226
8227         /* There is only one encoder per connector */
8228         drm_connector_for_each_possible_encoder(connector, encoder)
8229                 return encoder;
8230
8231         return NULL;
8232 }
8233
8234 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8235 {
8236         struct drm_encoder *encoder;
8237         struct amdgpu_encoder *amdgpu_encoder;
8238
8239         encoder = amdgpu_dm_connector_to_encoder(connector);
8240
8241         if (encoder == NULL)
8242                 return;
8243
8244         amdgpu_encoder = to_amdgpu_encoder(encoder);
8245
8246         amdgpu_encoder->native_mode.clock = 0;
8247
8248         if (!list_empty(&connector->probed_modes)) {
8249                 struct drm_display_mode *preferred_mode = NULL;
8250
8251                 list_for_each_entry(preferred_mode,
8252                                     &connector->probed_modes,
8253                                     head) {
8254                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8255                                 amdgpu_encoder->native_mode = *preferred_mode;
8256
8257                         break;
8258                 }
8259
8260         }
8261 }
8262
8263 static struct drm_display_mode *
8264 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8265                              char *name,
8266                              int hdisplay, int vdisplay)
8267 {
8268         struct drm_device *dev = encoder->dev;
8269         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8270         struct drm_display_mode *mode = NULL;
8271         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8272
8273         mode = drm_mode_duplicate(dev, native_mode);
8274
8275         if (mode == NULL)
8276                 return NULL;
8277
8278         mode->hdisplay = hdisplay;
8279         mode->vdisplay = vdisplay;
8280         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8281         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8282
8283         return mode;
8284
8285 }
8286
8287 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8288                                                  struct drm_connector *connector)
8289 {
8290         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8291         struct drm_display_mode *mode = NULL;
8292         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8293         struct amdgpu_dm_connector *amdgpu_dm_connector =
8294                                 to_amdgpu_dm_connector(connector);
8295         int i;
8296         int n;
8297         struct mode_size {
8298                 char name[DRM_DISPLAY_MODE_LEN];
8299                 int w;
8300                 int h;
8301         } common_modes[] = {
8302                 {  "640x480",  640,  480},
8303                 {  "800x600",  800,  600},
8304                 { "1024x768", 1024,  768},
8305                 { "1280x720", 1280,  720},
8306                 { "1280x800", 1280,  800},
8307                 {"1280x1024", 1280, 1024},
8308                 { "1440x900", 1440,  900},
8309                 {"1680x1050", 1680, 1050},
8310                 {"1600x1200", 1600, 1200},
8311                 {"1920x1080", 1920, 1080},
8312                 {"1920x1200", 1920, 1200}
8313         };
8314
8315         n = ARRAY_SIZE(common_modes);
8316
8317         for (i = 0; i < n; i++) {
8318                 struct drm_display_mode *curmode = NULL;
8319                 bool mode_existed = false;
8320
8321                 if (common_modes[i].w > native_mode->hdisplay ||
8322                     common_modes[i].h > native_mode->vdisplay ||
8323                    (common_modes[i].w == native_mode->hdisplay &&
8324                     common_modes[i].h == native_mode->vdisplay))
8325                         continue;
8326
8327                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8328                         if (common_modes[i].w == curmode->hdisplay &&
8329                             common_modes[i].h == curmode->vdisplay) {
8330                                 mode_existed = true;
8331                                 break;
8332                         }
8333                 }
8334
8335                 if (mode_existed)
8336                         continue;
8337
8338                 mode = amdgpu_dm_create_common_mode(encoder,
8339                                 common_modes[i].name, common_modes[i].w,
8340                                 common_modes[i].h);
8341                 if (!mode)
8342                         continue;
8343
8344                 drm_mode_probed_add(connector, mode);
8345                 amdgpu_dm_connector->num_modes++;
8346         }
8347 }
8348
8349 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8350 {
8351         struct drm_encoder *encoder;
8352         struct amdgpu_encoder *amdgpu_encoder;
8353         const struct drm_display_mode *native_mode;
8354
8355         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8356             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8357                 return;
8358
8359         encoder = amdgpu_dm_connector_to_encoder(connector);
8360         if (!encoder)
8361                 return;
8362
8363         amdgpu_encoder = to_amdgpu_encoder(encoder);
8364
8365         native_mode = &amdgpu_encoder->native_mode;
8366         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8367                 return;
8368
8369         drm_connector_set_panel_orientation_with_quirk(connector,
8370                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8371                                                        native_mode->hdisplay,
8372                                                        native_mode->vdisplay);
8373 }
8374
8375 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8376                                               struct edid *edid)
8377 {
8378         struct amdgpu_dm_connector *amdgpu_dm_connector =
8379                         to_amdgpu_dm_connector(connector);
8380
8381         if (edid) {
8382                 /* empty probed_modes */
8383                 INIT_LIST_HEAD(&connector->probed_modes);
8384                 amdgpu_dm_connector->num_modes =
8385                                 drm_add_edid_modes(connector, edid);
8386
8387                 /* sorting the probed modes before calling function
8388                  * amdgpu_dm_get_native_mode() since EDID can have
8389                  * more than one preferred mode. The modes that are
8390                  * later in the probed mode list could be of higher
8391                  * and preferred resolution. For example, 3840x2160
8392                  * resolution in base EDID preferred timing and 4096x2160
8393                  * preferred resolution in DID extension block later.
8394                  */
8395                 drm_mode_sort(&connector->probed_modes);
8396                 amdgpu_dm_get_native_mode(connector);
8397
8398                 /* Freesync capabilities are reset by calling
8399                  * drm_add_edid_modes() and need to be
8400                  * restored here.
8401                  */
8402                 amdgpu_dm_update_freesync_caps(connector, edid);
8403
8404                 amdgpu_set_panel_orientation(connector);
8405         } else {
8406                 amdgpu_dm_connector->num_modes = 0;
8407         }
8408 }
8409
8410 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8411                               struct drm_display_mode *mode)
8412 {
8413         struct drm_display_mode *m;
8414
8415         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8416                 if (drm_mode_equal(m, mode))
8417                         return true;
8418         }
8419
8420         return false;
8421 }
8422
8423 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8424 {
8425         const struct drm_display_mode *m;
8426         struct drm_display_mode *new_mode;
8427         uint i;
8428         uint32_t new_modes_count = 0;
8429
8430         /* Standard FPS values
8431          *
8432          * 23.976       - TV/NTSC
8433          * 24           - Cinema
8434          * 25           - TV/PAL
8435          * 29.97        - TV/NTSC
8436          * 30           - TV/NTSC
8437          * 48           - Cinema HFR
8438          * 50           - TV/PAL
8439          * 60           - Commonly used
8440          * 48,72,96,120 - Multiples of 24
8441          */
8442         static const uint32_t common_rates[] = {
8443                 23976, 24000, 25000, 29970, 30000,
8444                 48000, 50000, 60000, 72000, 96000, 120000
8445         };
8446
8447         /*
8448          * Find mode with highest refresh rate with the same resolution
8449          * as the preferred mode. Some monitors report a preferred mode
8450          * with lower resolution than the highest refresh rate supported.
8451          */
8452
8453         m = get_highest_refresh_rate_mode(aconnector, true);
8454         if (!m)
8455                 return 0;
8456
8457         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8458                 uint64_t target_vtotal, target_vtotal_diff;
8459                 uint64_t num, den;
8460
8461                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8462                         continue;
8463
8464                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8465                     common_rates[i] > aconnector->max_vfreq * 1000)
8466                         continue;
8467
8468                 num = (unsigned long long)m->clock * 1000 * 1000;
8469                 den = common_rates[i] * (unsigned long long)m->htotal;
8470                 target_vtotal = div_u64(num, den);
8471                 target_vtotal_diff = target_vtotal - m->vtotal;
8472
8473                 /* Check for illegal modes */
8474                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8475                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8476                     m->vtotal + target_vtotal_diff < m->vsync_end)
8477                         continue;
8478
8479                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8480                 if (!new_mode)
8481                         goto out;
8482
8483                 new_mode->vtotal += (u16)target_vtotal_diff;
8484                 new_mode->vsync_start += (u16)target_vtotal_diff;
8485                 new_mode->vsync_end += (u16)target_vtotal_diff;
8486                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8487                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8488
8489                 if (!is_duplicate_mode(aconnector, new_mode)) {
8490                         drm_mode_probed_add(&aconnector->base, new_mode);
8491                         new_modes_count += 1;
8492                 } else
8493                         drm_mode_destroy(aconnector->base.dev, new_mode);
8494         }
8495  out:
8496         return new_modes_count;
8497 }
8498
8499 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8500                                                    struct edid *edid)
8501 {
8502         struct amdgpu_dm_connector *amdgpu_dm_connector =
8503                 to_amdgpu_dm_connector(connector);
8504
8505         if (!edid)
8506                 return;
8507
8508         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8509                 amdgpu_dm_connector->num_modes +=
8510                         add_fs_modes(amdgpu_dm_connector);
8511 }
8512
8513 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8514 {
8515         struct amdgpu_dm_connector *amdgpu_dm_connector =
8516                         to_amdgpu_dm_connector(connector);
8517         struct drm_encoder *encoder;
8518         struct edid *edid = amdgpu_dm_connector->edid;
8519
8520         encoder = amdgpu_dm_connector_to_encoder(connector);
8521
8522         if (!drm_edid_is_valid(edid)) {
8523                 amdgpu_dm_connector->num_modes =
8524                                 drm_add_modes_noedid(connector, 640, 480);
8525         } else {
8526                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8527                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8528                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8529         }
8530         amdgpu_dm_fbc_init(connector);
8531
8532         return amdgpu_dm_connector->num_modes;
8533 }
8534
8535 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8536                                      struct amdgpu_dm_connector *aconnector,
8537                                      int connector_type,
8538                                      struct dc_link *link,
8539                                      int link_index)
8540 {
8541         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8542
8543         /*
8544          * Some of the properties below require access to state, like bpc.
8545          * Allocate some default initial connector state with our reset helper.
8546          */
8547         if (aconnector->base.funcs->reset)
8548                 aconnector->base.funcs->reset(&aconnector->base);
8549
8550         aconnector->connector_id = link_index;
8551         aconnector->dc_link = link;
8552         aconnector->base.interlace_allowed = false;
8553         aconnector->base.doublescan_allowed = false;
8554         aconnector->base.stereo_allowed = false;
8555         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8556         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8557         aconnector->audio_inst = -1;
8558         mutex_init(&aconnector->hpd_lock);
8559
8560         /*
8561          * configure support HPD hot plug connector_>polled default value is 0
8562          * which means HPD hot plug not supported
8563          */
8564         switch (connector_type) {
8565         case DRM_MODE_CONNECTOR_HDMIA:
8566                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8567                 aconnector->base.ycbcr_420_allowed =
8568                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8569                 break;
8570         case DRM_MODE_CONNECTOR_DisplayPort:
8571                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8572                 link->link_enc = link_enc_cfg_get_link_enc(link);
8573                 ASSERT(link->link_enc);
8574                 if (link->link_enc)
8575                         aconnector->base.ycbcr_420_allowed =
8576                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8577                 break;
8578         case DRM_MODE_CONNECTOR_DVID:
8579                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8580                 break;
8581         default:
8582                 break;
8583         }
8584
8585         drm_object_attach_property(&aconnector->base.base,
8586                                 dm->ddev->mode_config.scaling_mode_property,
8587                                 DRM_MODE_SCALE_NONE);
8588
8589         drm_object_attach_property(&aconnector->base.base,
8590                                 adev->mode_info.underscan_property,
8591                                 UNDERSCAN_OFF);
8592         drm_object_attach_property(&aconnector->base.base,
8593                                 adev->mode_info.underscan_hborder_property,
8594                                 0);
8595         drm_object_attach_property(&aconnector->base.base,
8596                                 adev->mode_info.underscan_vborder_property,
8597                                 0);
8598
8599         if (!aconnector->mst_port)
8600                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8601
8602         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8603         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8604         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8605
8606         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8607             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8608                 drm_object_attach_property(&aconnector->base.base,
8609                                 adev->mode_info.abm_level_property, 0);
8610         }
8611
8612         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8613             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8614             connector_type == DRM_MODE_CONNECTOR_eDP) {
8615                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8616
8617                 if (!aconnector->mst_port)
8618                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8619
8620 #ifdef CONFIG_DRM_AMD_DC_HDCP
8621                 if (adev->dm.hdcp_workqueue)
8622                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8623 #endif
8624         }
8625 }
8626
8627 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8628                               struct i2c_msg *msgs, int num)
8629 {
8630         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8631         struct ddc_service *ddc_service = i2c->ddc_service;
8632         struct i2c_command cmd;
8633         int i;
8634         int result = -EIO;
8635
8636         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8637
8638         if (!cmd.payloads)
8639                 return result;
8640
8641         cmd.number_of_payloads = num;
8642         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8643         cmd.speed = 100;
8644
8645         for (i = 0; i < num; i++) {
8646                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8647                 cmd.payloads[i].address = msgs[i].addr;
8648                 cmd.payloads[i].length = msgs[i].len;
8649                 cmd.payloads[i].data = msgs[i].buf;
8650         }
8651
8652         if (dc_submit_i2c(
8653                         ddc_service->ctx->dc,
8654                         ddc_service->ddc_pin->hw_info.ddc_channel,
8655                         &cmd))
8656                 result = num;
8657
8658         kfree(cmd.payloads);
8659         return result;
8660 }
8661
8662 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8663 {
8664         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8665 }
8666
8667 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8668         .master_xfer = amdgpu_dm_i2c_xfer,
8669         .functionality = amdgpu_dm_i2c_func,
8670 };
8671
8672 static struct amdgpu_i2c_adapter *
8673 create_i2c(struct ddc_service *ddc_service,
8674            int link_index,
8675            int *res)
8676 {
8677         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8678         struct amdgpu_i2c_adapter *i2c;
8679
8680         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8681         if (!i2c)
8682                 return NULL;
8683         i2c->base.owner = THIS_MODULE;
8684         i2c->base.class = I2C_CLASS_DDC;
8685         i2c->base.dev.parent = &adev->pdev->dev;
8686         i2c->base.algo = &amdgpu_dm_i2c_algo;
8687         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8688         i2c_set_adapdata(&i2c->base, i2c);
8689         i2c->ddc_service = ddc_service;
8690         if (i2c->ddc_service->ddc_pin)
8691                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8692
8693         return i2c;
8694 }
8695
8696
8697 /*
8698  * Note: this function assumes that dc_link_detect() was called for the
8699  * dc_link which will be represented by this aconnector.
8700  */
8701 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8702                                     struct amdgpu_dm_connector *aconnector,
8703                                     uint32_t link_index,
8704                                     struct amdgpu_encoder *aencoder)
8705 {
8706         int res = 0;
8707         int connector_type;
8708         struct dc *dc = dm->dc;
8709         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8710         struct amdgpu_i2c_adapter *i2c;
8711
8712         link->priv = aconnector;
8713
8714         DRM_DEBUG_DRIVER("%s()\n", __func__);
8715
8716         i2c = create_i2c(link->ddc, link->link_index, &res);
8717         if (!i2c) {
8718                 DRM_ERROR("Failed to create i2c adapter data\n");
8719                 return -ENOMEM;
8720         }
8721
8722         aconnector->i2c = i2c;
8723         res = i2c_add_adapter(&i2c->base);
8724
8725         if (res) {
8726                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8727                 goto out_free;
8728         }
8729
8730         connector_type = to_drm_connector_type(link->connector_signal);
8731
8732         res = drm_connector_init_with_ddc(
8733                         dm->ddev,
8734                         &aconnector->base,
8735                         &amdgpu_dm_connector_funcs,
8736                         connector_type,
8737                         &i2c->base);
8738
8739         if (res) {
8740                 DRM_ERROR("connector_init failed\n");
8741                 aconnector->connector_id = -1;
8742                 goto out_free;
8743         }
8744
8745         drm_connector_helper_add(
8746                         &aconnector->base,
8747                         &amdgpu_dm_connector_helper_funcs);
8748
8749         amdgpu_dm_connector_init_helper(
8750                 dm,
8751                 aconnector,
8752                 connector_type,
8753                 link,
8754                 link_index);
8755
8756         drm_connector_attach_encoder(
8757                 &aconnector->base, &aencoder->base);
8758
8759         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8760                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8761                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8762
8763 out_free:
8764         if (res) {
8765                 kfree(i2c);
8766                 aconnector->i2c = NULL;
8767         }
8768         return res;
8769 }
8770
8771 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8772 {
8773         switch (adev->mode_info.num_crtc) {
8774         case 1:
8775                 return 0x1;
8776         case 2:
8777                 return 0x3;
8778         case 3:
8779                 return 0x7;
8780         case 4:
8781                 return 0xf;
8782         case 5:
8783                 return 0x1f;
8784         case 6:
8785         default:
8786                 return 0x3f;
8787         }
8788 }
8789
8790 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8791                                   struct amdgpu_encoder *aencoder,
8792                                   uint32_t link_index)
8793 {
8794         struct amdgpu_device *adev = drm_to_adev(dev);
8795
8796         int res = drm_encoder_init(dev,
8797                                    &aencoder->base,
8798                                    &amdgpu_dm_encoder_funcs,
8799                                    DRM_MODE_ENCODER_TMDS,
8800                                    NULL);
8801
8802         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8803
8804         if (!res)
8805                 aencoder->encoder_id = link_index;
8806         else
8807                 aencoder->encoder_id = -1;
8808
8809         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8810
8811         return res;
8812 }
8813
8814 static void manage_dm_interrupts(struct amdgpu_device *adev,
8815                                  struct amdgpu_crtc *acrtc,
8816                                  bool enable)
8817 {
8818         /*
8819          * We have no guarantee that the frontend index maps to the same
8820          * backend index - some even map to more than one.
8821          *
8822          * TODO: Use a different interrupt or check DC itself for the mapping.
8823          */
8824         int irq_type =
8825                 amdgpu_display_crtc_idx_to_irq_type(
8826                         adev,
8827                         acrtc->crtc_id);
8828
8829         if (enable) {
8830                 drm_crtc_vblank_on(&acrtc->base);
8831                 amdgpu_irq_get(
8832                         adev,
8833                         &adev->pageflip_irq,
8834                         irq_type);
8835 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8836                 amdgpu_irq_get(
8837                         adev,
8838                         &adev->vline0_irq,
8839                         irq_type);
8840 #endif
8841         } else {
8842 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8843                 amdgpu_irq_put(
8844                         adev,
8845                         &adev->vline0_irq,
8846                         irq_type);
8847 #endif
8848                 amdgpu_irq_put(
8849                         adev,
8850                         &adev->pageflip_irq,
8851                         irq_type);
8852                 drm_crtc_vblank_off(&acrtc->base);
8853         }
8854 }
8855
8856 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8857                                       struct amdgpu_crtc *acrtc)
8858 {
8859         int irq_type =
8860                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8861
8862         /**
8863          * This reads the current state for the IRQ and force reapplies
8864          * the setting to hardware.
8865          */
8866         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8867 }
8868
8869 static bool
8870 is_scaling_state_different(const struct dm_connector_state *dm_state,
8871                            const struct dm_connector_state *old_dm_state)
8872 {
8873         if (dm_state->scaling != old_dm_state->scaling)
8874                 return true;
8875         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8876                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8877                         return true;
8878         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8879                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8880                         return true;
8881         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8882                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8883                 return true;
8884         return false;
8885 }
8886
8887 #ifdef CONFIG_DRM_AMD_DC_HDCP
8888 static bool is_content_protection_different(struct drm_connector_state *state,
8889                                             const struct drm_connector_state *old_state,
8890                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8891 {
8892         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8893         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8894
8895         /* Handle: Type0/1 change */
8896         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8897             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8898                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8899                 return true;
8900         }
8901
8902         /* CP is being re enabled, ignore this
8903          *
8904          * Handles:     ENABLED -> DESIRED
8905          */
8906         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8907             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8908                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8909                 return false;
8910         }
8911
8912         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8913          *
8914          * Handles:     UNDESIRED -> ENABLED
8915          */
8916         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8917             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8918                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8919
8920         /* Stream removed and re-enabled
8921          *
8922          * Can sometimes overlap with the HPD case,
8923          * thus set update_hdcp to false to avoid
8924          * setting HDCP multiple times.
8925          *
8926          * Handles:     DESIRED -> DESIRED (Special case)
8927          */
8928         if (!(old_state->crtc && old_state->crtc->enabled) &&
8929                 state->crtc && state->crtc->enabled &&
8930                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8931                 dm_con_state->update_hdcp = false;
8932                 return true;
8933         }
8934
8935         /* Hot-plug, headless s3, dpms
8936          *
8937          * Only start HDCP if the display is connected/enabled.
8938          * update_hdcp flag will be set to false until the next
8939          * HPD comes in.
8940          *
8941          * Handles:     DESIRED -> DESIRED (Special case)
8942          */
8943         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8944             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8945                 dm_con_state->update_hdcp = false;
8946                 return true;
8947         }
8948
8949         /*
8950          * Handles:     UNDESIRED -> UNDESIRED
8951          *              DESIRED -> DESIRED
8952          *              ENABLED -> ENABLED
8953          */
8954         if (old_state->content_protection == state->content_protection)
8955                 return false;
8956
8957         /*
8958          * Handles:     UNDESIRED -> DESIRED
8959          *              DESIRED -> UNDESIRED
8960          *              ENABLED -> UNDESIRED
8961          */
8962         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8963                 return true;
8964
8965         /*
8966          * Handles:     DESIRED -> ENABLED
8967          */
8968         return false;
8969 }
8970
8971 #endif
8972 static void remove_stream(struct amdgpu_device *adev,
8973                           struct amdgpu_crtc *acrtc,
8974                           struct dc_stream_state *stream)
8975 {
8976         /* this is the update mode case */
8977
8978         acrtc->otg_inst = -1;
8979         acrtc->enabled = false;
8980 }
8981
8982 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8983                                struct dc_cursor_position *position)
8984 {
8985         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8986         int x, y;
8987         int xorigin = 0, yorigin = 0;
8988
8989         if (!crtc || !plane->state->fb)
8990                 return 0;
8991
8992         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8993             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8994                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8995                           __func__,
8996                           plane->state->crtc_w,
8997                           plane->state->crtc_h);
8998                 return -EINVAL;
8999         }
9000
9001         x = plane->state->crtc_x;
9002         y = plane->state->crtc_y;
9003
9004         if (x <= -amdgpu_crtc->max_cursor_width ||
9005             y <= -amdgpu_crtc->max_cursor_height)
9006                 return 0;
9007
9008         if (x < 0) {
9009                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
9010                 x = 0;
9011         }
9012         if (y < 0) {
9013                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
9014                 y = 0;
9015         }
9016         position->enable = true;
9017         position->translate_by_source = true;
9018         position->x = x;
9019         position->y = y;
9020         position->x_hotspot = xorigin;
9021         position->y_hotspot = yorigin;
9022
9023         return 0;
9024 }
9025
9026 static void handle_cursor_update(struct drm_plane *plane,
9027                                  struct drm_plane_state *old_plane_state)
9028 {
9029         struct amdgpu_device *adev = drm_to_adev(plane->dev);
9030         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
9031         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
9032         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
9033         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9034         uint64_t address = afb ? afb->address : 0;
9035         struct dc_cursor_position position = {0};
9036         struct dc_cursor_attributes attributes;
9037         int ret;
9038
9039         if (!plane->state->fb && !old_plane_state->fb)
9040                 return;
9041
9042         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
9043                       __func__,
9044                       amdgpu_crtc->crtc_id,
9045                       plane->state->crtc_w,
9046                       plane->state->crtc_h);
9047
9048         ret = get_cursor_position(plane, crtc, &position);
9049         if (ret)
9050                 return;
9051
9052         if (!position.enable) {
9053                 /* turn off cursor */
9054                 if (crtc_state && crtc_state->stream) {
9055                         mutex_lock(&adev->dm.dc_lock);
9056                         dc_stream_set_cursor_position(crtc_state->stream,
9057                                                       &position);
9058                         mutex_unlock(&adev->dm.dc_lock);
9059                 }
9060                 return;
9061         }
9062
9063         amdgpu_crtc->cursor_width = plane->state->crtc_w;
9064         amdgpu_crtc->cursor_height = plane->state->crtc_h;
9065
9066         memset(&attributes, 0, sizeof(attributes));
9067         attributes.address.high_part = upper_32_bits(address);
9068         attributes.address.low_part  = lower_32_bits(address);
9069         attributes.width             = plane->state->crtc_w;
9070         attributes.height            = plane->state->crtc_h;
9071         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
9072         attributes.rotation_angle    = 0;
9073         attributes.attribute_flags.value = 0;
9074
9075         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
9076
9077         if (crtc_state->stream) {
9078                 mutex_lock(&adev->dm.dc_lock);
9079                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
9080                                                          &attributes))
9081                         DRM_ERROR("DC failed to set cursor attributes\n");
9082
9083                 if (!dc_stream_set_cursor_position(crtc_state->stream,
9084                                                    &position))
9085                         DRM_ERROR("DC failed to set cursor position\n");
9086                 mutex_unlock(&adev->dm.dc_lock);
9087         }
9088 }
9089
9090 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
9091 {
9092
9093         assert_spin_locked(&acrtc->base.dev->event_lock);
9094         WARN_ON(acrtc->event);
9095
9096         acrtc->event = acrtc->base.state->event;
9097
9098         /* Set the flip status */
9099         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
9100
9101         /* Mark this event as consumed */
9102         acrtc->base.state->event = NULL;
9103
9104         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
9105                      acrtc->crtc_id);
9106 }
9107
9108 static void update_freesync_state_on_stream(
9109         struct amdgpu_display_manager *dm,
9110         struct dm_crtc_state *new_crtc_state,
9111         struct dc_stream_state *new_stream,
9112         struct dc_plane_state *surface,
9113         u32 flip_timestamp_in_us)
9114 {
9115         struct mod_vrr_params vrr_params;
9116         struct dc_info_packet vrr_infopacket = {0};
9117         struct amdgpu_device *adev = dm->adev;
9118         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9119         unsigned long flags;
9120         bool pack_sdp_v1_3 = false;
9121
9122         if (!new_stream)
9123                 return;
9124
9125         /*
9126          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9127          * For now it's sufficient to just guard against these conditions.
9128          */
9129
9130         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9131                 return;
9132
9133         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9134         vrr_params = acrtc->dm_irq_params.vrr_params;
9135
9136         if (surface) {
9137                 mod_freesync_handle_preflip(
9138                         dm->freesync_module,
9139                         surface,
9140                         new_stream,
9141                         flip_timestamp_in_us,
9142                         &vrr_params);
9143
9144                 if (adev->family < AMDGPU_FAMILY_AI &&
9145                     amdgpu_dm_vrr_active(new_crtc_state)) {
9146                         mod_freesync_handle_v_update(dm->freesync_module,
9147                                                      new_stream, &vrr_params);
9148
9149                         /* Need to call this before the frame ends. */
9150                         dc_stream_adjust_vmin_vmax(dm->dc,
9151                                                    new_crtc_state->stream,
9152                                                    &vrr_params.adjust);
9153                 }
9154         }
9155
9156         mod_freesync_build_vrr_infopacket(
9157                 dm->freesync_module,
9158                 new_stream,
9159                 &vrr_params,
9160                 PACKET_TYPE_VRR,
9161                 TRANSFER_FUNC_UNKNOWN,
9162                 &vrr_infopacket,
9163                 pack_sdp_v1_3);
9164
9165         new_crtc_state->freesync_timing_changed |=
9166                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9167                         &vrr_params.adjust,
9168                         sizeof(vrr_params.adjust)) != 0);
9169
9170         new_crtc_state->freesync_vrr_info_changed |=
9171                 (memcmp(&new_crtc_state->vrr_infopacket,
9172                         &vrr_infopacket,
9173                         sizeof(vrr_infopacket)) != 0);
9174
9175         acrtc->dm_irq_params.vrr_params = vrr_params;
9176         new_crtc_state->vrr_infopacket = vrr_infopacket;
9177
9178         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9179         new_stream->vrr_infopacket = vrr_infopacket;
9180
9181         if (new_crtc_state->freesync_vrr_info_changed)
9182                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9183                               new_crtc_state->base.crtc->base.id,
9184                               (int)new_crtc_state->base.vrr_enabled,
9185                               (int)vrr_params.state);
9186
9187         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9188 }
9189
9190 static void update_stream_irq_parameters(
9191         struct amdgpu_display_manager *dm,
9192         struct dm_crtc_state *new_crtc_state)
9193 {
9194         struct dc_stream_state *new_stream = new_crtc_state->stream;
9195         struct mod_vrr_params vrr_params;
9196         struct mod_freesync_config config = new_crtc_state->freesync_config;
9197         struct amdgpu_device *adev = dm->adev;
9198         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9199         unsigned long flags;
9200
9201         if (!new_stream)
9202                 return;
9203
9204         /*
9205          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9206          * For now it's sufficient to just guard against these conditions.
9207          */
9208         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9209                 return;
9210
9211         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9212         vrr_params = acrtc->dm_irq_params.vrr_params;
9213
9214         if (new_crtc_state->vrr_supported &&
9215             config.min_refresh_in_uhz &&
9216             config.max_refresh_in_uhz) {
9217                 /*
9218                  * if freesync compatible mode was set, config.state will be set
9219                  * in atomic check
9220                  */
9221                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9222                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9223                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9224                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9225                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9226                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9227                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9228                 } else {
9229                         config.state = new_crtc_state->base.vrr_enabled ?
9230                                                      VRR_STATE_ACTIVE_VARIABLE :
9231                                                      VRR_STATE_INACTIVE;
9232                 }
9233         } else {
9234                 config.state = VRR_STATE_UNSUPPORTED;
9235         }
9236
9237         mod_freesync_build_vrr_params(dm->freesync_module,
9238                                       new_stream,
9239                                       &config, &vrr_params);
9240
9241         new_crtc_state->freesync_timing_changed |=
9242                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9243                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9244
9245         new_crtc_state->freesync_config = config;
9246         /* Copy state for access from DM IRQ handler */
9247         acrtc->dm_irq_params.freesync_config = config;
9248         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9249         acrtc->dm_irq_params.vrr_params = vrr_params;
9250         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9251 }
9252
9253 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9254                                             struct dm_crtc_state *new_state)
9255 {
9256         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9257         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9258
9259         if (!old_vrr_active && new_vrr_active) {
9260                 /* Transition VRR inactive -> active:
9261                  * While VRR is active, we must not disable vblank irq, as a
9262                  * reenable after disable would compute bogus vblank/pflip
9263                  * timestamps if it likely happened inside display front-porch.
9264                  *
9265                  * We also need vupdate irq for the actual core vblank handling
9266                  * at end of vblank.
9267                  */
9268                 dm_set_vupdate_irq(new_state->base.crtc, true);
9269                 drm_crtc_vblank_get(new_state->base.crtc);
9270                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9271                                  __func__, new_state->base.crtc->base.id);
9272         } else if (old_vrr_active && !new_vrr_active) {
9273                 /* Transition VRR active -> inactive:
9274                  * Allow vblank irq disable again for fixed refresh rate.
9275                  */
9276                 dm_set_vupdate_irq(new_state->base.crtc, false);
9277                 drm_crtc_vblank_put(new_state->base.crtc);
9278                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9279                                  __func__, new_state->base.crtc->base.id);
9280         }
9281 }
9282
9283 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9284 {
9285         struct drm_plane *plane;
9286         struct drm_plane_state *old_plane_state;
9287         int i;
9288
9289         /*
9290          * TODO: Make this per-stream so we don't issue redundant updates for
9291          * commits with multiple streams.
9292          */
9293         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9294                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9295                         handle_cursor_update(plane, old_plane_state);
9296 }
9297
9298 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9299                                     struct dc_state *dc_state,
9300                                     struct drm_device *dev,
9301                                     struct amdgpu_display_manager *dm,
9302                                     struct drm_crtc *pcrtc,
9303                                     bool wait_for_vblank)
9304 {
9305         uint32_t i;
9306         uint64_t timestamp_ns;
9307         struct drm_plane *plane;
9308         struct drm_plane_state *old_plane_state, *new_plane_state;
9309         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9310         struct drm_crtc_state *new_pcrtc_state =
9311                         drm_atomic_get_new_crtc_state(state, pcrtc);
9312         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9313         struct dm_crtc_state *dm_old_crtc_state =
9314                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9315         int planes_count = 0, vpos, hpos;
9316         unsigned long flags;
9317         uint32_t target_vblank, last_flip_vblank;
9318         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9319         bool pflip_present = false;
9320         struct {
9321                 struct dc_surface_update surface_updates[MAX_SURFACES];
9322                 struct dc_plane_info plane_infos[MAX_SURFACES];
9323                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9324                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9325                 struct dc_stream_update stream_update;
9326         } *bundle;
9327
9328         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9329
9330         if (!bundle) {
9331                 dm_error("Failed to allocate update bundle\n");
9332                 goto cleanup;
9333         }
9334
9335         /*
9336          * Disable the cursor first if we're disabling all the planes.
9337          * It'll remain on the screen after the planes are re-enabled
9338          * if we don't.
9339          */
9340         if (acrtc_state->active_planes == 0)
9341                 amdgpu_dm_commit_cursors(state);
9342
9343         /* update planes when needed */
9344         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9345                 struct drm_crtc *crtc = new_plane_state->crtc;
9346                 struct drm_crtc_state *new_crtc_state;
9347                 struct drm_framebuffer *fb = new_plane_state->fb;
9348                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9349                 bool plane_needs_flip;
9350                 struct dc_plane_state *dc_plane;
9351                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9352
9353                 /* Cursor plane is handled after stream updates */
9354                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9355                         continue;
9356
9357                 if (!fb || !crtc || pcrtc != crtc)
9358                         continue;
9359
9360                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9361                 if (!new_crtc_state->active)
9362                         continue;
9363
9364                 dc_plane = dm_new_plane_state->dc_state;
9365
9366                 bundle->surface_updates[planes_count].surface = dc_plane;
9367                 if (new_pcrtc_state->color_mgmt_changed) {
9368                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9369                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9370                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9371                 }
9372
9373                 fill_dc_scaling_info(dm->adev, new_plane_state,
9374                                      &bundle->scaling_infos[planes_count]);
9375
9376                 bundle->surface_updates[planes_count].scaling_info =
9377                         &bundle->scaling_infos[planes_count];
9378
9379                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9380
9381                 pflip_present = pflip_present || plane_needs_flip;
9382
9383                 if (!plane_needs_flip) {
9384                         planes_count += 1;
9385                         continue;
9386                 }
9387
9388                 fill_dc_plane_info_and_addr(
9389                         dm->adev, new_plane_state,
9390                         afb->tiling_flags,
9391                         &bundle->plane_infos[planes_count],
9392                         &bundle->flip_addrs[planes_count].address,
9393                         afb->tmz_surface, false);
9394
9395                 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9396                                  new_plane_state->plane->index,
9397                                  bundle->plane_infos[planes_count].dcc.enable);
9398
9399                 bundle->surface_updates[planes_count].plane_info =
9400                         &bundle->plane_infos[planes_count];
9401
9402                 fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
9403                                     new_crtc_state,
9404                                     &bundle->flip_addrs[planes_count]);
9405
9406                 /*
9407                  * Only allow immediate flips for fast updates that don't
9408                  * change FB pitch, DCC state, rotation or mirroing.
9409                  */
9410                 bundle->flip_addrs[planes_count].flip_immediate =
9411                         crtc->state->async_flip &&
9412                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9413
9414                 timestamp_ns = ktime_get_ns();
9415                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9416                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9417                 bundle->surface_updates[planes_count].surface = dc_plane;
9418
9419                 if (!bundle->surface_updates[planes_count].surface) {
9420                         DRM_ERROR("No surface for CRTC: id=%d\n",
9421                                         acrtc_attach->crtc_id);
9422                         continue;
9423                 }
9424
9425                 if (plane == pcrtc->primary)
9426                         update_freesync_state_on_stream(
9427                                 dm,
9428                                 acrtc_state,
9429                                 acrtc_state->stream,
9430                                 dc_plane,
9431                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9432
9433                 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9434                                  __func__,
9435                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9436                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9437
9438                 planes_count += 1;
9439
9440         }
9441
9442         if (pflip_present) {
9443                 if (!vrr_active) {
9444                         /* Use old throttling in non-vrr fixed refresh rate mode
9445                          * to keep flip scheduling based on target vblank counts
9446                          * working in a backwards compatible way, e.g., for
9447                          * clients using the GLX_OML_sync_control extension or
9448                          * DRI3/Present extension with defined target_msc.
9449                          */
9450                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9451                 }
9452                 else {
9453                         /* For variable refresh rate mode only:
9454                          * Get vblank of last completed flip to avoid > 1 vrr
9455                          * flips per video frame by use of throttling, but allow
9456                          * flip programming anywhere in the possibly large
9457                          * variable vrr vblank interval for fine-grained flip
9458                          * timing control and more opportunity to avoid stutter
9459                          * on late submission of flips.
9460                          */
9461                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9462                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9463                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9464                 }
9465
9466                 target_vblank = last_flip_vblank + wait_for_vblank;
9467
9468                 /*
9469                  * Wait until we're out of the vertical blank period before the one
9470                  * targeted by the flip
9471                  */
9472                 while ((acrtc_attach->enabled &&
9473                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9474                                                             0, &vpos, &hpos, NULL,
9475                                                             NULL, &pcrtc->hwmode)
9476                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9477                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9478                         (int)(target_vblank -
9479                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9480                         usleep_range(1000, 1100);
9481                 }
9482
9483                 /**
9484                  * Prepare the flip event for the pageflip interrupt to handle.
9485                  *
9486                  * This only works in the case where we've already turned on the
9487                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9488                  * from 0 -> n planes we have to skip a hardware generated event
9489                  * and rely on sending it from software.
9490                  */
9491                 if (acrtc_attach->base.state->event &&
9492                     acrtc_state->active_planes > 0) {
9493                         drm_crtc_vblank_get(pcrtc);
9494
9495                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9496
9497                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9498                         prepare_flip_isr(acrtc_attach);
9499
9500                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9501                 }
9502
9503                 if (acrtc_state->stream) {
9504                         if (acrtc_state->freesync_vrr_info_changed)
9505                                 bundle->stream_update.vrr_infopacket =
9506                                         &acrtc_state->stream->vrr_infopacket;
9507                 }
9508         }
9509
9510         /* Update the planes if changed or disable if we don't have any. */
9511         if ((planes_count || acrtc_state->active_planes == 0) &&
9512                 acrtc_state->stream) {
9513                 /*
9514                  * If PSR or idle optimizations are enabled then flush out
9515                  * any pending work before hardware programming.
9516                  */
9517                 if (dm->vblank_control_workqueue)
9518                         flush_workqueue(dm->vblank_control_workqueue);
9519
9520                 bundle->stream_update.stream = acrtc_state->stream;
9521                 if (new_pcrtc_state->mode_changed) {
9522                         bundle->stream_update.src = acrtc_state->stream->src;
9523                         bundle->stream_update.dst = acrtc_state->stream->dst;
9524                 }
9525
9526                 if (new_pcrtc_state->color_mgmt_changed) {
9527                         /*
9528                          * TODO: This isn't fully correct since we've actually
9529                          * already modified the stream in place.
9530                          */
9531                         bundle->stream_update.gamut_remap =
9532                                 &acrtc_state->stream->gamut_remap_matrix;
9533                         bundle->stream_update.output_csc_transform =
9534                                 &acrtc_state->stream->csc_color_matrix;
9535                         bundle->stream_update.out_transfer_func =
9536                                 acrtc_state->stream->out_transfer_func;
9537                 }
9538
9539                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9540                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9541                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9542
9543                 /*
9544                  * If FreeSync state on the stream has changed then we need to
9545                  * re-adjust the min/max bounds now that DC doesn't handle this
9546                  * as part of commit.
9547                  */
9548                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9549                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9550                         dc_stream_adjust_vmin_vmax(
9551                                 dm->dc, acrtc_state->stream,
9552                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9553                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9554                 }
9555                 mutex_lock(&dm->dc_lock);
9556                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9557                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9558                         amdgpu_dm_psr_disable(acrtc_state->stream);
9559
9560                 dc_commit_updates_for_stream(dm->dc,
9561                                                      bundle->surface_updates,
9562                                                      planes_count,
9563                                                      acrtc_state->stream,
9564                                                      &bundle->stream_update,
9565                                                      dc_state);
9566
9567                 /**
9568                  * Enable or disable the interrupts on the backend.
9569                  *
9570                  * Most pipes are put into power gating when unused.
9571                  *
9572                  * When power gating is enabled on a pipe we lose the
9573                  * interrupt enablement state when power gating is disabled.
9574                  *
9575                  * So we need to update the IRQ control state in hardware
9576                  * whenever the pipe turns on (since it could be previously
9577                  * power gated) or off (since some pipes can't be power gated
9578                  * on some ASICs).
9579                  */
9580                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9581                         dm_update_pflip_irq_state(drm_to_adev(dev),
9582                                                   acrtc_attach);
9583
9584                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9585                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9586                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9587                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9588
9589                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9590                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9591                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9592                         struct amdgpu_dm_connector *aconn =
9593                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9594
9595                         if (aconn->psr_skip_count > 0)
9596                                 aconn->psr_skip_count--;
9597
9598                         /* Allow PSR when skip count is 0. */
9599                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9600
9601                         /*
9602                          * If sink supports PSR SU, there is no need to rely on
9603                          * a vblank event disable request to enable PSR. PSR SU
9604                          * can be enabled immediately once OS demonstrates an
9605                          * adequate number of fast atomic commits to notify KMD
9606                          * of update events. See `vblank_control_worker()`.
9607                          */
9608                         if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
9609                             acrtc_attach->dm_irq_params.allow_psr_entry &&
9610                             !acrtc_state->stream->link->psr_settings.psr_allow_active)
9611                                 amdgpu_dm_psr_enable(acrtc_state->stream);
9612                 } else {
9613                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9614                 }
9615
9616                 mutex_unlock(&dm->dc_lock);
9617         }
9618
9619         /*
9620          * Update cursor state *after* programming all the planes.
9621          * This avoids redundant programming in the case where we're going
9622          * to be disabling a single plane - those pipes are being disabled.
9623          */
9624         if (acrtc_state->active_planes)
9625                 amdgpu_dm_commit_cursors(state);
9626
9627 cleanup:
9628         kfree(bundle);
9629 }
9630
9631 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9632                                    struct drm_atomic_state *state)
9633 {
9634         struct amdgpu_device *adev = drm_to_adev(dev);
9635         struct amdgpu_dm_connector *aconnector;
9636         struct drm_connector *connector;
9637         struct drm_connector_state *old_con_state, *new_con_state;
9638         struct drm_crtc_state *new_crtc_state;
9639         struct dm_crtc_state *new_dm_crtc_state;
9640         const struct dc_stream_status *status;
9641         int i, inst;
9642
9643         /* Notify device removals. */
9644         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9645                 if (old_con_state->crtc != new_con_state->crtc) {
9646                         /* CRTC changes require notification. */
9647                         goto notify;
9648                 }
9649
9650                 if (!new_con_state->crtc)
9651                         continue;
9652
9653                 new_crtc_state = drm_atomic_get_new_crtc_state(
9654                         state, new_con_state->crtc);
9655
9656                 if (!new_crtc_state)
9657                         continue;
9658
9659                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9660                         continue;
9661
9662         notify:
9663                 aconnector = to_amdgpu_dm_connector(connector);
9664
9665                 mutex_lock(&adev->dm.audio_lock);
9666                 inst = aconnector->audio_inst;
9667                 aconnector->audio_inst = -1;
9668                 mutex_unlock(&adev->dm.audio_lock);
9669
9670                 amdgpu_dm_audio_eld_notify(adev, inst);
9671         }
9672
9673         /* Notify audio device additions. */
9674         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9675                 if (!new_con_state->crtc)
9676                         continue;
9677
9678                 new_crtc_state = drm_atomic_get_new_crtc_state(
9679                         state, new_con_state->crtc);
9680
9681                 if (!new_crtc_state)
9682                         continue;
9683
9684                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9685                         continue;
9686
9687                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9688                 if (!new_dm_crtc_state->stream)
9689                         continue;
9690
9691                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9692                 if (!status)
9693                         continue;
9694
9695                 aconnector = to_amdgpu_dm_connector(connector);
9696
9697                 mutex_lock(&adev->dm.audio_lock);
9698                 inst = status->audio_inst;
9699                 aconnector->audio_inst = inst;
9700                 mutex_unlock(&adev->dm.audio_lock);
9701
9702                 amdgpu_dm_audio_eld_notify(adev, inst);
9703         }
9704 }
9705
9706 /*
9707  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9708  * @crtc_state: the DRM CRTC state
9709  * @stream_state: the DC stream state.
9710  *
9711  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9712  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9713  */
9714 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9715                                                 struct dc_stream_state *stream_state)
9716 {
9717         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9718 }
9719
9720 /**
9721  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9722  * @state: The atomic state to commit
9723  *
9724  * This will tell DC to commit the constructed DC state from atomic_check,
9725  * programming the hardware. Any failures here implies a hardware failure, since
9726  * atomic check should have filtered anything non-kosher.
9727  */
9728 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9729 {
9730         struct drm_device *dev = state->dev;
9731         struct amdgpu_device *adev = drm_to_adev(dev);
9732         struct amdgpu_display_manager *dm = &adev->dm;
9733         struct dm_atomic_state *dm_state;
9734         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9735         uint32_t i, j;
9736         struct drm_crtc *crtc;
9737         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9738         unsigned long flags;
9739         bool wait_for_vblank = true;
9740         struct drm_connector *connector;
9741         struct drm_connector_state *old_con_state, *new_con_state;
9742         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9743         int crtc_disable_count = 0;
9744         bool mode_set_reset_required = false;
9745         int r;
9746
9747         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9748
9749         r = drm_atomic_helper_wait_for_fences(dev, state, false);
9750         if (unlikely(r))
9751                 DRM_ERROR("Waiting for fences timed out!");
9752
9753         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9754
9755         dm_state = dm_atomic_get_new_state(state);
9756         if (dm_state && dm_state->context) {
9757                 dc_state = dm_state->context;
9758         } else {
9759                 /* No state changes, retain current state. */
9760                 dc_state_temp = dc_create_state(dm->dc);
9761                 ASSERT(dc_state_temp);
9762                 dc_state = dc_state_temp;
9763                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9764         }
9765
9766         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9767                                        new_crtc_state, i) {
9768                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9769
9770                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9771
9772                 if (old_crtc_state->active &&
9773                     (!new_crtc_state->active ||
9774                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9775                         manage_dm_interrupts(adev, acrtc, false);
9776                         dc_stream_release(dm_old_crtc_state->stream);
9777                 }
9778         }
9779
9780         drm_atomic_helper_calc_timestamping_constants(state);
9781
9782         /* update changed items */
9783         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9784                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9785
9786                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9787                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9788
9789                 drm_dbg_state(state->dev,
9790                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9791                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9792                         "connectors_changed:%d\n",
9793                         acrtc->crtc_id,
9794                         new_crtc_state->enable,
9795                         new_crtc_state->active,
9796                         new_crtc_state->planes_changed,
9797                         new_crtc_state->mode_changed,
9798                         new_crtc_state->active_changed,
9799                         new_crtc_state->connectors_changed);
9800
9801                 /* Disable cursor if disabling crtc */
9802                 if (old_crtc_state->active && !new_crtc_state->active) {
9803                         struct dc_cursor_position position;
9804
9805                         memset(&position, 0, sizeof(position));
9806                         mutex_lock(&dm->dc_lock);
9807                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9808                         mutex_unlock(&dm->dc_lock);
9809                 }
9810
9811                 /* Copy all transient state flags into dc state */
9812                 if (dm_new_crtc_state->stream) {
9813                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9814                                                             dm_new_crtc_state->stream);
9815                 }
9816
9817                 /* handles headless hotplug case, updating new_state and
9818                  * aconnector as needed
9819                  */
9820
9821                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9822
9823                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9824
9825                         if (!dm_new_crtc_state->stream) {
9826                                 /*
9827                                  * this could happen because of issues with
9828                                  * userspace notifications delivery.
9829                                  * In this case userspace tries to set mode on
9830                                  * display which is disconnected in fact.
9831                                  * dc_sink is NULL in this case on aconnector.
9832                                  * We expect reset mode will come soon.
9833                                  *
9834                                  * This can also happen when unplug is done
9835                                  * during resume sequence ended
9836                                  *
9837                                  * In this case, we want to pretend we still
9838                                  * have a sink to keep the pipe running so that
9839                                  * hw state is consistent with the sw state
9840                                  */
9841                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9842                                                 __func__, acrtc->base.base.id);
9843                                 continue;
9844                         }
9845
9846                         if (dm_old_crtc_state->stream)
9847                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9848
9849                         pm_runtime_get_noresume(dev->dev);
9850
9851                         acrtc->enabled = true;
9852                         acrtc->hw_mode = new_crtc_state->mode;
9853                         crtc->hwmode = new_crtc_state->mode;
9854                         mode_set_reset_required = true;
9855                 } else if (modereset_required(new_crtc_state)) {
9856                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9857                         /* i.e. reset mode */
9858                         if (dm_old_crtc_state->stream)
9859                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9860
9861                         mode_set_reset_required = true;
9862                 }
9863         } /* for_each_crtc_in_state() */
9864
9865         if (dc_state) {
9866                 /* if there mode set or reset, disable eDP PSR */
9867                 if (mode_set_reset_required) {
9868                         if (dm->vblank_control_workqueue)
9869                                 flush_workqueue(dm->vblank_control_workqueue);
9870
9871                         amdgpu_dm_psr_disable_all(dm);
9872                 }
9873
9874                 dm_enable_per_frame_crtc_master_sync(dc_state);
9875                 mutex_lock(&dm->dc_lock);
9876                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9877
9878                 /* Allow idle optimization when vblank count is 0 for display off */
9879                 if (dm->active_vblank_irq_count == 0)
9880                         dc_allow_idle_optimizations(dm->dc, true);
9881                 mutex_unlock(&dm->dc_lock);
9882         }
9883
9884         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9885                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9886
9887                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9888
9889                 if (dm_new_crtc_state->stream != NULL) {
9890                         const struct dc_stream_status *status =
9891                                         dc_stream_get_status(dm_new_crtc_state->stream);
9892
9893                         if (!status)
9894                                 status = dc_stream_get_status_from_state(dc_state,
9895                                                                          dm_new_crtc_state->stream);
9896                         if (!status)
9897                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9898                         else
9899                                 acrtc->otg_inst = status->primary_otg_inst;
9900                 }
9901         }
9902 #ifdef CONFIG_DRM_AMD_DC_HDCP
9903         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9904                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9905                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9906                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9907
9908                 new_crtc_state = NULL;
9909
9910                 if (acrtc)
9911                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9912
9913                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9914
9915                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9916                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9917                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9918                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9919                         dm_new_con_state->update_hdcp = true;
9920                         continue;
9921                 }
9922
9923                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9924                         hdcp_update_display(
9925                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9926                                 new_con_state->hdcp_content_type,
9927                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9928         }
9929 #endif
9930
9931         /* Handle connector state changes */
9932         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9933                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9934                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9935                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9936                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9937                 struct dc_stream_update stream_update;
9938                 struct dc_info_packet hdr_packet;
9939                 struct dc_stream_status *status = NULL;
9940                 bool abm_changed, hdr_changed, scaling_changed;
9941
9942                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9943                 memset(&stream_update, 0, sizeof(stream_update));
9944
9945                 if (acrtc) {
9946                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9947                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9948                 }
9949
9950                 /* Skip any modesets/resets */
9951                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9952                         continue;
9953
9954                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9955                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9956
9957                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9958                                                              dm_old_con_state);
9959
9960                 abm_changed = dm_new_crtc_state->abm_level !=
9961                               dm_old_crtc_state->abm_level;
9962
9963                 hdr_changed =
9964                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9965
9966                 if (!scaling_changed && !abm_changed && !hdr_changed)
9967                         continue;
9968
9969                 stream_update.stream = dm_new_crtc_state->stream;
9970                 if (scaling_changed) {
9971                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9972                                         dm_new_con_state, dm_new_crtc_state->stream);
9973
9974                         stream_update.src = dm_new_crtc_state->stream->src;
9975                         stream_update.dst = dm_new_crtc_state->stream->dst;
9976                 }
9977
9978                 if (abm_changed) {
9979                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9980
9981                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9982                 }
9983
9984                 if (hdr_changed) {
9985                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9986                         stream_update.hdr_static_metadata = &hdr_packet;
9987                 }
9988
9989                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9990
9991                 if (WARN_ON(!status))
9992                         continue;
9993
9994                 WARN_ON(!status->plane_count);
9995
9996                 /*
9997                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9998                  * Here we create an empty update on each plane.
9999                  * To fix this, DC should permit updating only stream properties.
10000                  */
10001                 for (j = 0; j < status->plane_count; j++)
10002                         dummy_updates[j].surface = status->plane_states[0];
10003
10004
10005                 mutex_lock(&dm->dc_lock);
10006                 dc_commit_updates_for_stream(dm->dc,
10007                                                      dummy_updates,
10008                                                      status->plane_count,
10009                                                      dm_new_crtc_state->stream,
10010                                                      &stream_update,
10011                                                      dc_state);
10012                 mutex_unlock(&dm->dc_lock);
10013         }
10014
10015         /* Count number of newly disabled CRTCs for dropping PM refs later. */
10016         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
10017                                       new_crtc_state, i) {
10018                 if (old_crtc_state->active && !new_crtc_state->active)
10019                         crtc_disable_count++;
10020
10021                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10022                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10023
10024                 /* For freesync config update on crtc state and params for irq */
10025                 update_stream_irq_parameters(dm, dm_new_crtc_state);
10026
10027                 /* Handle vrr on->off / off->on transitions */
10028                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
10029                                                 dm_new_crtc_state);
10030         }
10031
10032         /**
10033          * Enable interrupts for CRTCs that are newly enabled or went through
10034          * a modeset. It was intentionally deferred until after the front end
10035          * state was modified to wait until the OTG was on and so the IRQ
10036          * handlers didn't access stale or invalid state.
10037          */
10038         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10039                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10040 #ifdef CONFIG_DEBUG_FS
10041                 bool configure_crc = false;
10042                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
10043 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10044                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
10045 #endif
10046                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10047                 cur_crc_src = acrtc->dm_irq_params.crc_src;
10048                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10049 #endif
10050                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10051
10052                 if (new_crtc_state->active &&
10053                     (!old_crtc_state->active ||
10054                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10055                         dc_stream_retain(dm_new_crtc_state->stream);
10056                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
10057                         manage_dm_interrupts(adev, acrtc, true);
10058
10059 #ifdef CONFIG_DEBUG_FS
10060                         /**
10061                          * Frontend may have changed so reapply the CRC capture
10062                          * settings for the stream.
10063                          */
10064                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10065
10066                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
10067                                 configure_crc = true;
10068 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10069                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
10070                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10071                                         acrtc->dm_irq_params.crc_window.update_win = true;
10072                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
10073                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
10074                                         crc_rd_wrk->crtc = crtc;
10075                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
10076                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10077                                 }
10078 #endif
10079                         }
10080
10081                         if (configure_crc)
10082                                 if (amdgpu_dm_crtc_configure_crc_source(
10083                                         crtc, dm_new_crtc_state, cur_crc_src))
10084                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
10085 #endif
10086                 }
10087         }
10088
10089         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
10090                 if (new_crtc_state->async_flip)
10091                         wait_for_vblank = false;
10092
10093         /* update planes when needed per crtc*/
10094         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
10095                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10096
10097                 if (dm_new_crtc_state->stream)
10098                         amdgpu_dm_commit_planes(state, dc_state, dev,
10099                                                 dm, crtc, wait_for_vblank);
10100         }
10101
10102         /* Update audio instances for each connector. */
10103         amdgpu_dm_commit_audio(dev, state);
10104
10105         /* restore the backlight level */
10106         for (i = 0; i < dm->num_of_edps; i++) {
10107                 if (dm->backlight_dev[i] &&
10108                     (dm->actual_brightness[i] != dm->brightness[i]))
10109                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10110         }
10111
10112         /*
10113          * send vblank event on all events not handled in flip and
10114          * mark consumed event for drm_atomic_helper_commit_hw_done
10115          */
10116         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10117         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10118
10119                 if (new_crtc_state->event)
10120                         drm_send_event_locked(dev, &new_crtc_state->event->base);
10121
10122                 new_crtc_state->event = NULL;
10123         }
10124         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10125
10126         /* Signal HW programming completion */
10127         drm_atomic_helper_commit_hw_done(state);
10128
10129         if (wait_for_vblank)
10130                 drm_atomic_helper_wait_for_flip_done(dev, state);
10131
10132         drm_atomic_helper_cleanup_planes(dev, state);
10133
10134         /* return the stolen vga memory back to VRAM */
10135         if (!adev->mman.keep_stolen_vga_memory)
10136                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10137         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10138
10139         /*
10140          * Finally, drop a runtime PM reference for each newly disabled CRTC,
10141          * so we can put the GPU into runtime suspend if we're not driving any
10142          * displays anymore
10143          */
10144         for (i = 0; i < crtc_disable_count; i++)
10145                 pm_runtime_put_autosuspend(dev->dev);
10146         pm_runtime_mark_last_busy(dev->dev);
10147
10148         if (dc_state_temp)
10149                 dc_release_state(dc_state_temp);
10150 }
10151
10152
10153 static int dm_force_atomic_commit(struct drm_connector *connector)
10154 {
10155         int ret = 0;
10156         struct drm_device *ddev = connector->dev;
10157         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10158         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10159         struct drm_plane *plane = disconnected_acrtc->base.primary;
10160         struct drm_connector_state *conn_state;
10161         struct drm_crtc_state *crtc_state;
10162         struct drm_plane_state *plane_state;
10163
10164         if (!state)
10165                 return -ENOMEM;
10166
10167         state->acquire_ctx = ddev->mode_config.acquire_ctx;
10168
10169         /* Construct an atomic state to restore previous display setting */
10170
10171         /*
10172          * Attach connectors to drm_atomic_state
10173          */
10174         conn_state = drm_atomic_get_connector_state(state, connector);
10175
10176         ret = PTR_ERR_OR_ZERO(conn_state);
10177         if (ret)
10178                 goto out;
10179
10180         /* Attach crtc to drm_atomic_state*/
10181         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10182
10183         ret = PTR_ERR_OR_ZERO(crtc_state);
10184         if (ret)
10185                 goto out;
10186
10187         /* force a restore */
10188         crtc_state->mode_changed = true;
10189
10190         /* Attach plane to drm_atomic_state */
10191         plane_state = drm_atomic_get_plane_state(state, plane);
10192
10193         ret = PTR_ERR_OR_ZERO(plane_state);
10194         if (ret)
10195                 goto out;
10196
10197         /* Call commit internally with the state we just constructed */
10198         ret = drm_atomic_commit(state);
10199
10200 out:
10201         drm_atomic_state_put(state);
10202         if (ret)
10203                 DRM_ERROR("Restoring old state failed with %i\n", ret);
10204
10205         return ret;
10206 }
10207
10208 /*
10209  * This function handles all cases when set mode does not come upon hotplug.
10210  * This includes when a display is unplugged then plugged back into the
10211  * same port and when running without usermode desktop manager supprot
10212  */
10213 void dm_restore_drm_connector_state(struct drm_device *dev,
10214                                     struct drm_connector *connector)
10215 {
10216         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10217         struct amdgpu_crtc *disconnected_acrtc;
10218         struct dm_crtc_state *acrtc_state;
10219
10220         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10221                 return;
10222
10223         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10224         if (!disconnected_acrtc)
10225                 return;
10226
10227         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10228         if (!acrtc_state->stream)
10229                 return;
10230
10231         /*
10232          * If the previous sink is not released and different from the current,
10233          * we deduce we are in a state where we can not rely on usermode call
10234          * to turn on the display, so we do it here
10235          */
10236         if (acrtc_state->stream->sink != aconnector->dc_sink)
10237                 dm_force_atomic_commit(&aconnector->base);
10238 }
10239
10240 /*
10241  * Grabs all modesetting locks to serialize against any blocking commits,
10242  * Waits for completion of all non blocking commits.
10243  */
10244 static int do_aquire_global_lock(struct drm_device *dev,
10245                                  struct drm_atomic_state *state)
10246 {
10247         struct drm_crtc *crtc;
10248         struct drm_crtc_commit *commit;
10249         long ret;
10250
10251         /*
10252          * Adding all modeset locks to aquire_ctx will
10253          * ensure that when the framework release it the
10254          * extra locks we are locking here will get released to
10255          */
10256         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10257         if (ret)
10258                 return ret;
10259
10260         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10261                 spin_lock(&crtc->commit_lock);
10262                 commit = list_first_entry_or_null(&crtc->commit_list,
10263                                 struct drm_crtc_commit, commit_entry);
10264                 if (commit)
10265                         drm_crtc_commit_get(commit);
10266                 spin_unlock(&crtc->commit_lock);
10267
10268                 if (!commit)
10269                         continue;
10270
10271                 /*
10272                  * Make sure all pending HW programming completed and
10273                  * page flips done
10274                  */
10275                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10276
10277                 if (ret > 0)
10278                         ret = wait_for_completion_interruptible_timeout(
10279                                         &commit->flip_done, 10*HZ);
10280
10281                 if (ret == 0)
10282                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10283                                   "timed out\n", crtc->base.id, crtc->name);
10284
10285                 drm_crtc_commit_put(commit);
10286         }
10287
10288         return ret < 0 ? ret : 0;
10289 }
10290
10291 static void get_freesync_config_for_crtc(
10292         struct dm_crtc_state *new_crtc_state,
10293         struct dm_connector_state *new_con_state)
10294 {
10295         struct mod_freesync_config config = {0};
10296         struct amdgpu_dm_connector *aconnector =
10297                         to_amdgpu_dm_connector(new_con_state->base.connector);
10298         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10299         int vrefresh = drm_mode_vrefresh(mode);
10300         bool fs_vid_mode = false;
10301
10302         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10303                                         vrefresh >= aconnector->min_vfreq &&
10304                                         vrefresh <= aconnector->max_vfreq;
10305
10306         if (new_crtc_state->vrr_supported) {
10307                 new_crtc_state->stream->ignore_msa_timing_param = true;
10308                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10309
10310                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10311                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10312                 config.vsif_supported = true;
10313                 config.btr = true;
10314
10315                 if (fs_vid_mode) {
10316                         config.state = VRR_STATE_ACTIVE_FIXED;
10317                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10318                         goto out;
10319                 } else if (new_crtc_state->base.vrr_enabled) {
10320                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10321                 } else {
10322                         config.state = VRR_STATE_INACTIVE;
10323                 }
10324         }
10325 out:
10326         new_crtc_state->freesync_config = config;
10327 }
10328
10329 static void reset_freesync_config_for_crtc(
10330         struct dm_crtc_state *new_crtc_state)
10331 {
10332         new_crtc_state->vrr_supported = false;
10333
10334         memset(&new_crtc_state->vrr_infopacket, 0,
10335                sizeof(new_crtc_state->vrr_infopacket));
10336 }
10337
10338 static bool
10339 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10340                                  struct drm_crtc_state *new_crtc_state)
10341 {
10342         const struct drm_display_mode *old_mode, *new_mode;
10343
10344         if (!old_crtc_state || !new_crtc_state)
10345                 return false;
10346
10347         old_mode = &old_crtc_state->mode;
10348         new_mode = &new_crtc_state->mode;
10349
10350         if (old_mode->clock       == new_mode->clock &&
10351             old_mode->hdisplay    == new_mode->hdisplay &&
10352             old_mode->vdisplay    == new_mode->vdisplay &&
10353             old_mode->htotal      == new_mode->htotal &&
10354             old_mode->vtotal      != new_mode->vtotal &&
10355             old_mode->hsync_start == new_mode->hsync_start &&
10356             old_mode->vsync_start != new_mode->vsync_start &&
10357             old_mode->hsync_end   == new_mode->hsync_end &&
10358             old_mode->vsync_end   != new_mode->vsync_end &&
10359             old_mode->hskew       == new_mode->hskew &&
10360             old_mode->vscan       == new_mode->vscan &&
10361             (old_mode->vsync_end - old_mode->vsync_start) ==
10362             (new_mode->vsync_end - new_mode->vsync_start))
10363                 return true;
10364
10365         return false;
10366 }
10367
10368 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10369         uint64_t num, den, res;
10370         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10371
10372         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10373
10374         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10375         den = (unsigned long long)new_crtc_state->mode.htotal *
10376               (unsigned long long)new_crtc_state->mode.vtotal;
10377
10378         res = div_u64(num, den);
10379         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10380 }
10381
10382 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10383                          struct drm_atomic_state *state,
10384                          struct drm_crtc *crtc,
10385                          struct drm_crtc_state *old_crtc_state,
10386                          struct drm_crtc_state *new_crtc_state,
10387                          bool enable,
10388                          bool *lock_and_validation_needed)
10389 {
10390         struct dm_atomic_state *dm_state = NULL;
10391         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10392         struct dc_stream_state *new_stream;
10393         int ret = 0;
10394
10395         /*
10396          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10397          * update changed items
10398          */
10399         struct amdgpu_crtc *acrtc = NULL;
10400         struct amdgpu_dm_connector *aconnector = NULL;
10401         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10402         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10403
10404         new_stream = NULL;
10405
10406         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10407         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10408         acrtc = to_amdgpu_crtc(crtc);
10409         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10410
10411         /* TODO This hack should go away */
10412         if (aconnector && enable) {
10413                 /* Make sure fake sink is created in plug-in scenario */
10414                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10415                                                             &aconnector->base);
10416                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10417                                                             &aconnector->base);
10418
10419                 if (IS_ERR(drm_new_conn_state)) {
10420                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10421                         goto fail;
10422                 }
10423
10424                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10425                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10426
10427                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10428                         goto skip_modeset;
10429
10430                 new_stream = create_validate_stream_for_sink(aconnector,
10431                                                              &new_crtc_state->mode,
10432                                                              dm_new_conn_state,
10433                                                              dm_old_crtc_state->stream);
10434
10435                 /*
10436                  * we can have no stream on ACTION_SET if a display
10437                  * was disconnected during S3, in this case it is not an
10438                  * error, the OS will be updated after detection, and
10439                  * will do the right thing on next atomic commit
10440                  */
10441
10442                 if (!new_stream) {
10443                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10444                                         __func__, acrtc->base.base.id);
10445                         ret = -ENOMEM;
10446                         goto fail;
10447                 }
10448
10449                 /*
10450                  * TODO: Check VSDB bits to decide whether this should
10451                  * be enabled or not.
10452                  */
10453                 new_stream->triggered_crtc_reset.enabled =
10454                         dm->force_timing_sync;
10455
10456                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10457
10458                 ret = fill_hdr_info_packet(drm_new_conn_state,
10459                                            &new_stream->hdr_static_metadata);
10460                 if (ret)
10461                         goto fail;
10462
10463                 /*
10464                  * If we already removed the old stream from the context
10465                  * (and set the new stream to NULL) then we can't reuse
10466                  * the old stream even if the stream and scaling are unchanged.
10467                  * We'll hit the BUG_ON and black screen.
10468                  *
10469                  * TODO: Refactor this function to allow this check to work
10470                  * in all conditions.
10471                  */
10472                 if (dm_new_crtc_state->stream &&
10473                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10474                         goto skip_modeset;
10475
10476                 if (dm_new_crtc_state->stream &&
10477                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10478                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10479                         new_crtc_state->mode_changed = false;
10480                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10481                                          new_crtc_state->mode_changed);
10482                 }
10483         }
10484
10485         /* mode_changed flag may get updated above, need to check again */
10486         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10487                 goto skip_modeset;
10488
10489         drm_dbg_state(state->dev,
10490                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10491                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10492                 "connectors_changed:%d\n",
10493                 acrtc->crtc_id,
10494                 new_crtc_state->enable,
10495                 new_crtc_state->active,
10496                 new_crtc_state->planes_changed,
10497                 new_crtc_state->mode_changed,
10498                 new_crtc_state->active_changed,
10499                 new_crtc_state->connectors_changed);
10500
10501         /* Remove stream for any changed/disabled CRTC */
10502         if (!enable) {
10503
10504                 if (!dm_old_crtc_state->stream)
10505                         goto skip_modeset;
10506
10507                 if (dm_new_crtc_state->stream &&
10508                     is_timing_unchanged_for_freesync(new_crtc_state,
10509                                                      old_crtc_state)) {
10510                         new_crtc_state->mode_changed = false;
10511                         DRM_DEBUG_DRIVER(
10512                                 "Mode change not required for front porch change, "
10513                                 "setting mode_changed to %d",
10514                                 new_crtc_state->mode_changed);
10515
10516                         set_freesync_fixed_config(dm_new_crtc_state);
10517
10518                         goto skip_modeset;
10519                 } else if (aconnector &&
10520                            is_freesync_video_mode(&new_crtc_state->mode,
10521                                                   aconnector)) {
10522                         struct drm_display_mode *high_mode;
10523
10524                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10525                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10526                                 set_freesync_fixed_config(dm_new_crtc_state);
10527                         }
10528                 }
10529
10530                 ret = dm_atomic_get_state(state, &dm_state);
10531                 if (ret)
10532                         goto fail;
10533
10534                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10535                                 crtc->base.id);
10536
10537                 /* i.e. reset mode */
10538                 if (dc_remove_stream_from_ctx(
10539                                 dm->dc,
10540                                 dm_state->context,
10541                                 dm_old_crtc_state->stream) != DC_OK) {
10542                         ret = -EINVAL;
10543                         goto fail;
10544                 }
10545
10546                 dc_stream_release(dm_old_crtc_state->stream);
10547                 dm_new_crtc_state->stream = NULL;
10548
10549                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10550
10551                 *lock_and_validation_needed = true;
10552
10553         } else {/* Add stream for any updated/enabled CRTC */
10554                 /*
10555                  * Quick fix to prevent NULL pointer on new_stream when
10556                  * added MST connectors not found in existing crtc_state in the chained mode
10557                  * TODO: need to dig out the root cause of that
10558                  */
10559                 if (!aconnector)
10560                         goto skip_modeset;
10561
10562                 if (modereset_required(new_crtc_state))
10563                         goto skip_modeset;
10564
10565                 if (modeset_required(new_crtc_state, new_stream,
10566                                      dm_old_crtc_state->stream)) {
10567
10568                         WARN_ON(dm_new_crtc_state->stream);
10569
10570                         ret = dm_atomic_get_state(state, &dm_state);
10571                         if (ret)
10572                                 goto fail;
10573
10574                         dm_new_crtc_state->stream = new_stream;
10575
10576                         dc_stream_retain(new_stream);
10577
10578                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10579                                          crtc->base.id);
10580
10581                         if (dc_add_stream_to_ctx(
10582                                         dm->dc,
10583                                         dm_state->context,
10584                                         dm_new_crtc_state->stream) != DC_OK) {
10585                                 ret = -EINVAL;
10586                                 goto fail;
10587                         }
10588
10589                         *lock_and_validation_needed = true;
10590                 }
10591         }
10592
10593 skip_modeset:
10594         /* Release extra reference */
10595         if (new_stream)
10596                  dc_stream_release(new_stream);
10597
10598         /*
10599          * We want to do dc stream updates that do not require a
10600          * full modeset below.
10601          */
10602         if (!(enable && aconnector && new_crtc_state->active))
10603                 return 0;
10604         /*
10605          * Given above conditions, the dc state cannot be NULL because:
10606          * 1. We're in the process of enabling CRTCs (just been added
10607          *    to the dc context, or already is on the context)
10608          * 2. Has a valid connector attached, and
10609          * 3. Is currently active and enabled.
10610          * => The dc stream state currently exists.
10611          */
10612         BUG_ON(dm_new_crtc_state->stream == NULL);
10613
10614         /* Scaling or underscan settings */
10615         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10616                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10617                 update_stream_scaling_settings(
10618                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10619
10620         /* ABM settings */
10621         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10622
10623         /*
10624          * Color management settings. We also update color properties
10625          * when a modeset is needed, to ensure it gets reprogrammed.
10626          */
10627         if (dm_new_crtc_state->base.color_mgmt_changed ||
10628             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10629                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10630                 if (ret)
10631                         goto fail;
10632         }
10633
10634         /* Update Freesync settings. */
10635         get_freesync_config_for_crtc(dm_new_crtc_state,
10636                                      dm_new_conn_state);
10637
10638         return ret;
10639
10640 fail:
10641         if (new_stream)
10642                 dc_stream_release(new_stream);
10643         return ret;
10644 }
10645
10646 static bool should_reset_plane(struct drm_atomic_state *state,
10647                                struct drm_plane *plane,
10648                                struct drm_plane_state *old_plane_state,
10649                                struct drm_plane_state *new_plane_state)
10650 {
10651         struct drm_plane *other;
10652         struct drm_plane_state *old_other_state, *new_other_state;
10653         struct drm_crtc_state *new_crtc_state;
10654         int i;
10655
10656         /*
10657          * TODO: Remove this hack once the checks below are sufficient
10658          * enough to determine when we need to reset all the planes on
10659          * the stream.
10660          */
10661         if (state->allow_modeset)
10662                 return true;
10663
10664         /* Exit early if we know that we're adding or removing the plane. */
10665         if (old_plane_state->crtc != new_plane_state->crtc)
10666                 return true;
10667
10668         /* old crtc == new_crtc == NULL, plane not in context. */
10669         if (!new_plane_state->crtc)
10670                 return false;
10671
10672         new_crtc_state =
10673                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10674
10675         if (!new_crtc_state)
10676                 return true;
10677
10678         /* CRTC Degamma changes currently require us to recreate planes. */
10679         if (new_crtc_state->color_mgmt_changed)
10680                 return true;
10681
10682         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10683                 return true;
10684
10685         /*
10686          * If there are any new primary or overlay planes being added or
10687          * removed then the z-order can potentially change. To ensure
10688          * correct z-order and pipe acquisition the current DC architecture
10689          * requires us to remove and recreate all existing planes.
10690          *
10691          * TODO: Come up with a more elegant solution for this.
10692          */
10693         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10694                 struct amdgpu_framebuffer *old_afb, *new_afb;
10695                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10696                         continue;
10697
10698                 if (old_other_state->crtc != new_plane_state->crtc &&
10699                     new_other_state->crtc != new_plane_state->crtc)
10700                         continue;
10701
10702                 if (old_other_state->crtc != new_other_state->crtc)
10703                         return true;
10704
10705                 /* Src/dst size and scaling updates. */
10706                 if (old_other_state->src_w != new_other_state->src_w ||
10707                     old_other_state->src_h != new_other_state->src_h ||
10708                     old_other_state->crtc_w != new_other_state->crtc_w ||
10709                     old_other_state->crtc_h != new_other_state->crtc_h)
10710                         return true;
10711
10712                 /* Rotation / mirroring updates. */
10713                 if (old_other_state->rotation != new_other_state->rotation)
10714                         return true;
10715
10716                 /* Blending updates. */
10717                 if (old_other_state->pixel_blend_mode !=
10718                     new_other_state->pixel_blend_mode)
10719                         return true;
10720
10721                 /* Alpha updates. */
10722                 if (old_other_state->alpha != new_other_state->alpha)
10723                         return true;
10724
10725                 /* Colorspace changes. */
10726                 if (old_other_state->color_range != new_other_state->color_range ||
10727                     old_other_state->color_encoding != new_other_state->color_encoding)
10728                         return true;
10729
10730                 /* Framebuffer checks fall at the end. */
10731                 if (!old_other_state->fb || !new_other_state->fb)
10732                         continue;
10733
10734                 /* Pixel format changes can require bandwidth updates. */
10735                 if (old_other_state->fb->format != new_other_state->fb->format)
10736                         return true;
10737
10738                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10739                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10740
10741                 /* Tiling and DCC changes also require bandwidth updates. */
10742                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10743                     old_afb->base.modifier != new_afb->base.modifier)
10744                         return true;
10745         }
10746
10747         return false;
10748 }
10749
10750 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10751                               struct drm_plane_state *new_plane_state,
10752                               struct drm_framebuffer *fb)
10753 {
10754         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10755         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10756         unsigned int pitch;
10757         bool linear;
10758
10759         if (fb->width > new_acrtc->max_cursor_width ||
10760             fb->height > new_acrtc->max_cursor_height) {
10761                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10762                                  new_plane_state->fb->width,
10763                                  new_plane_state->fb->height);
10764                 return -EINVAL;
10765         }
10766         if (new_plane_state->src_w != fb->width << 16 ||
10767             new_plane_state->src_h != fb->height << 16) {
10768                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10769                 return -EINVAL;
10770         }
10771
10772         /* Pitch in pixels */
10773         pitch = fb->pitches[0] / fb->format->cpp[0];
10774
10775         if (fb->width != pitch) {
10776                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10777                                  fb->width, pitch);
10778                 return -EINVAL;
10779         }
10780
10781         switch (pitch) {
10782         case 64:
10783         case 128:
10784         case 256:
10785                 /* FB pitch is supported by cursor plane */
10786                 break;
10787         default:
10788                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10789                 return -EINVAL;
10790         }
10791
10792         /* Core DRM takes care of checking FB modifiers, so we only need to
10793          * check tiling flags when the FB doesn't have a modifier. */
10794         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10795                 if (adev->family < AMDGPU_FAMILY_AI) {
10796                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10797                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10798                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10799                 } else {
10800                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10801                 }
10802                 if (!linear) {
10803                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10804                         return -EINVAL;
10805                 }
10806         }
10807
10808         return 0;
10809 }
10810
10811 static int dm_update_plane_state(struct dc *dc,
10812                                  struct drm_atomic_state *state,
10813                                  struct drm_plane *plane,
10814                                  struct drm_plane_state *old_plane_state,
10815                                  struct drm_plane_state *new_plane_state,
10816                                  bool enable,
10817                                  bool *lock_and_validation_needed)
10818 {
10819
10820         struct dm_atomic_state *dm_state = NULL;
10821         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10822         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10823         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10824         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10825         struct amdgpu_crtc *new_acrtc;
10826         bool needs_reset;
10827         int ret = 0;
10828
10829
10830         new_plane_crtc = new_plane_state->crtc;
10831         old_plane_crtc = old_plane_state->crtc;
10832         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10833         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10834
10835         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10836                 if (!enable || !new_plane_crtc ||
10837                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10838                         return 0;
10839
10840                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10841
10842                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10843                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10844                         return -EINVAL;
10845                 }
10846
10847                 if (new_plane_state->fb) {
10848                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10849                                                  new_plane_state->fb);
10850                         if (ret)
10851                                 return ret;
10852                 }
10853
10854                 return 0;
10855         }
10856
10857         needs_reset = should_reset_plane(state, plane, old_plane_state,
10858                                          new_plane_state);
10859
10860         /* Remove any changed/removed planes */
10861         if (!enable) {
10862                 if (!needs_reset)
10863                         return 0;
10864
10865                 if (!old_plane_crtc)
10866                         return 0;
10867
10868                 old_crtc_state = drm_atomic_get_old_crtc_state(
10869                                 state, old_plane_crtc);
10870                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10871
10872                 if (!dm_old_crtc_state->stream)
10873                         return 0;
10874
10875                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10876                                 plane->base.id, old_plane_crtc->base.id);
10877
10878                 ret = dm_atomic_get_state(state, &dm_state);
10879                 if (ret)
10880                         return ret;
10881
10882                 if (!dc_remove_plane_from_context(
10883                                 dc,
10884                                 dm_old_crtc_state->stream,
10885                                 dm_old_plane_state->dc_state,
10886                                 dm_state->context)) {
10887
10888                         return -EINVAL;
10889                 }
10890
10891
10892                 dc_plane_state_release(dm_old_plane_state->dc_state);
10893                 dm_new_plane_state->dc_state = NULL;
10894
10895                 *lock_and_validation_needed = true;
10896
10897         } else { /* Add new planes */
10898                 struct dc_plane_state *dc_new_plane_state;
10899
10900                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10901                         return 0;
10902
10903                 if (!new_plane_crtc)
10904                         return 0;
10905
10906                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10907                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10908
10909                 if (!dm_new_crtc_state->stream)
10910                         return 0;
10911
10912                 if (!needs_reset)
10913                         return 0;
10914
10915                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10916                 if (ret)
10917                         return ret;
10918
10919                 WARN_ON(dm_new_plane_state->dc_state);
10920
10921                 dc_new_plane_state = dc_create_plane_state(dc);
10922                 if (!dc_new_plane_state)
10923                         return -ENOMEM;
10924
10925                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10926                                  plane->base.id, new_plane_crtc->base.id);
10927
10928                 ret = fill_dc_plane_attributes(
10929                         drm_to_adev(new_plane_crtc->dev),
10930                         dc_new_plane_state,
10931                         new_plane_state,
10932                         new_crtc_state);
10933                 if (ret) {
10934                         dc_plane_state_release(dc_new_plane_state);
10935                         return ret;
10936                 }
10937
10938                 ret = dm_atomic_get_state(state, &dm_state);
10939                 if (ret) {
10940                         dc_plane_state_release(dc_new_plane_state);
10941                         return ret;
10942                 }
10943
10944                 /*
10945                  * Any atomic check errors that occur after this will
10946                  * not need a release. The plane state will be attached
10947                  * to the stream, and therefore part of the atomic
10948                  * state. It'll be released when the atomic state is
10949                  * cleaned.
10950                  */
10951                 if (!dc_add_plane_to_context(
10952                                 dc,
10953                                 dm_new_crtc_state->stream,
10954                                 dc_new_plane_state,
10955                                 dm_state->context)) {
10956
10957                         dc_plane_state_release(dc_new_plane_state);
10958                         return -EINVAL;
10959                 }
10960
10961                 dm_new_plane_state->dc_state = dc_new_plane_state;
10962
10963                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10964
10965                 /* Tell DC to do a full surface update every time there
10966                  * is a plane change. Inefficient, but works for now.
10967                  */
10968                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10969
10970                 *lock_and_validation_needed = true;
10971         }
10972
10973
10974         return ret;
10975 }
10976
10977 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10978                                        int *src_w, int *src_h)
10979 {
10980         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10981         case DRM_MODE_ROTATE_90:
10982         case DRM_MODE_ROTATE_270:
10983                 *src_w = plane_state->src_h >> 16;
10984                 *src_h = plane_state->src_w >> 16;
10985                 break;
10986         case DRM_MODE_ROTATE_0:
10987         case DRM_MODE_ROTATE_180:
10988         default:
10989                 *src_w = plane_state->src_w >> 16;
10990                 *src_h = plane_state->src_h >> 16;
10991                 break;
10992         }
10993 }
10994
10995 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10996                                 struct drm_crtc *crtc,
10997                                 struct drm_crtc_state *new_crtc_state)
10998 {
10999         struct drm_plane *cursor = crtc->cursor, *underlying;
11000         struct drm_plane_state *new_cursor_state, *new_underlying_state;
11001         int i;
11002         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
11003         int cursor_src_w, cursor_src_h;
11004         int underlying_src_w, underlying_src_h;
11005
11006         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
11007          * cursor per pipe but it's going to inherit the scaling and
11008          * positioning from the underlying pipe. Check the cursor plane's
11009          * blending properties match the underlying planes'. */
11010
11011         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
11012         if (!new_cursor_state || !new_cursor_state->fb) {
11013                 return 0;
11014         }
11015
11016         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
11017         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
11018         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
11019
11020         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
11021                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
11022                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
11023                         continue;
11024
11025                 /* Ignore disabled planes */
11026                 if (!new_underlying_state->fb)
11027                         continue;
11028
11029                 dm_get_oriented_plane_size(new_underlying_state,
11030                                            &underlying_src_w, &underlying_src_h);
11031                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
11032                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
11033
11034                 if (cursor_scale_w != underlying_scale_w ||
11035                     cursor_scale_h != underlying_scale_h) {
11036                         drm_dbg_atomic(crtc->dev,
11037                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
11038                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
11039                         return -EINVAL;
11040                 }
11041
11042                 /* If this plane covers the whole CRTC, no need to check planes underneath */
11043                 if (new_underlying_state->crtc_x <= 0 &&
11044                     new_underlying_state->crtc_y <= 0 &&
11045                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
11046                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
11047                         break;
11048         }
11049
11050         return 0;
11051 }
11052
11053 #if defined(CONFIG_DRM_AMD_DC_DCN)
11054 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
11055 {
11056         struct drm_connector *connector;
11057         struct drm_connector_state *conn_state, *old_conn_state;
11058         struct amdgpu_dm_connector *aconnector = NULL;
11059         int i;
11060         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
11061                 if (!conn_state->crtc)
11062                         conn_state = old_conn_state;
11063
11064                 if (conn_state->crtc != crtc)
11065                         continue;
11066
11067                 aconnector = to_amdgpu_dm_connector(connector);
11068                 if (!aconnector->port || !aconnector->mst_port)
11069                         aconnector = NULL;
11070                 else
11071                         break;
11072         }
11073
11074         if (!aconnector)
11075                 return 0;
11076
11077         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
11078 }
11079 #endif
11080
11081 /**
11082  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
11083  * @dev: The DRM device
11084  * @state: The atomic state to commit
11085  *
11086  * Validate that the given atomic state is programmable by DC into hardware.
11087  * This involves constructing a &struct dc_state reflecting the new hardware
11088  * state we wish to commit, then querying DC to see if it is programmable. It's
11089  * important not to modify the existing DC state. Otherwise, atomic_check
11090  * may unexpectedly commit hardware changes.
11091  *
11092  * When validating the DC state, it's important that the right locks are
11093  * acquired. For full updates case which removes/adds/updates streams on one
11094  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
11095  * that any such full update commit will wait for completion of any outstanding
11096  * flip using DRMs synchronization events.
11097  *
11098  * Note that DM adds the affected connectors for all CRTCs in state, when that
11099  * might not seem necessary. This is because DC stream creation requires the
11100  * DC sink, which is tied to the DRM connector state. Cleaning this up should
11101  * be possible but non-trivial - a possible TODO item.
11102  *
11103  * Return: -Error code if validation failed.
11104  */
11105 static int amdgpu_dm_atomic_check(struct drm_device *dev,
11106                                   struct drm_atomic_state *state)
11107 {
11108         struct amdgpu_device *adev = drm_to_adev(dev);
11109         struct dm_atomic_state *dm_state = NULL;
11110         struct dc *dc = adev->dm.dc;
11111         struct drm_connector *connector;
11112         struct drm_connector_state *old_con_state, *new_con_state;
11113         struct drm_crtc *crtc;
11114         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11115         struct drm_plane *plane;
11116         struct drm_plane_state *old_plane_state, *new_plane_state;
11117         enum dc_status status;
11118         int ret, i;
11119         bool lock_and_validation_needed = false;
11120         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11121 #if defined(CONFIG_DRM_AMD_DC_DCN)
11122         struct dsc_mst_fairness_vars vars[MAX_PIPES];
11123         struct drm_dp_mst_topology_state *mst_state;
11124         struct drm_dp_mst_topology_mgr *mgr;
11125 #endif
11126
11127         trace_amdgpu_dm_atomic_check_begin(state);
11128
11129         ret = drm_atomic_helper_check_modeset(dev, state);
11130         if (ret) {
11131                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
11132                 goto fail;
11133         }
11134
11135         /* Check connector changes */
11136         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11137                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11138                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11139
11140                 /* Skip connectors that are disabled or part of modeset already. */
11141                 if (!old_con_state->crtc && !new_con_state->crtc)
11142                         continue;
11143
11144                 if (!new_con_state->crtc)
11145                         continue;
11146
11147                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11148                 if (IS_ERR(new_crtc_state)) {
11149                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
11150                         ret = PTR_ERR(new_crtc_state);
11151                         goto fail;
11152                 }
11153
11154                 if (dm_old_con_state->abm_level !=
11155                     dm_new_con_state->abm_level)
11156                         new_crtc_state->connectors_changed = true;
11157         }
11158
11159 #if defined(CONFIG_DRM_AMD_DC_DCN)
11160         if (dc_resource_is_dsc_encoding_supported(dc)) {
11161                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11162                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11163                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
11164                                 if (ret) {
11165                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11166                                         goto fail;
11167                                 }
11168                         }
11169                 }
11170                 if (!pre_validate_dsc(state, &dm_state, vars)) {
11171                         ret = -EINVAL;
11172                         goto fail;
11173                 }
11174         }
11175 #endif
11176         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11177                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11178
11179                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11180                     !new_crtc_state->color_mgmt_changed &&
11181                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11182                         dm_old_crtc_state->dsc_force_changed == false)
11183                         continue;
11184
11185                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11186                 if (ret) {
11187                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11188                         goto fail;
11189                 }
11190
11191                 if (!new_crtc_state->enable)
11192                         continue;
11193
11194                 ret = drm_atomic_add_affected_connectors(state, crtc);
11195                 if (ret) {
11196                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11197                         goto fail;
11198                 }
11199
11200                 ret = drm_atomic_add_affected_planes(state, crtc);
11201                 if (ret) {
11202                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11203                         goto fail;
11204                 }
11205
11206                 if (dm_old_crtc_state->dsc_force_changed)
11207                         new_crtc_state->mode_changed = true;
11208         }
11209
11210         /*
11211          * Add all primary and overlay planes on the CRTC to the state
11212          * whenever a plane is enabled to maintain correct z-ordering
11213          * and to enable fast surface updates.
11214          */
11215         drm_for_each_crtc(crtc, dev) {
11216                 bool modified = false;
11217
11218                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11219                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11220                                 continue;
11221
11222                         if (new_plane_state->crtc == crtc ||
11223                             old_plane_state->crtc == crtc) {
11224                                 modified = true;
11225                                 break;
11226                         }
11227                 }
11228
11229                 if (!modified)
11230                         continue;
11231
11232                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11233                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11234                                 continue;
11235
11236                         new_plane_state =
11237                                 drm_atomic_get_plane_state(state, plane);
11238
11239                         if (IS_ERR(new_plane_state)) {
11240                                 ret = PTR_ERR(new_plane_state);
11241                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11242                                 goto fail;
11243                         }
11244                 }
11245         }
11246
11247         /* Remove exiting planes if they are modified */
11248         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11249                 ret = dm_update_plane_state(dc, state, plane,
11250                                             old_plane_state,
11251                                             new_plane_state,
11252                                             false,
11253                                             &lock_and_validation_needed);
11254                 if (ret) {
11255                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11256                         goto fail;
11257                 }
11258         }
11259
11260         /* Disable all crtcs which require disable */
11261         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11262                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11263                                            old_crtc_state,
11264                                            new_crtc_state,
11265                                            false,
11266                                            &lock_and_validation_needed);
11267                 if (ret) {
11268                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11269                         goto fail;
11270                 }
11271         }
11272
11273         /* Enable all crtcs which require enable */
11274         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11275                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11276                                            old_crtc_state,
11277                                            new_crtc_state,
11278                                            true,
11279                                            &lock_and_validation_needed);
11280                 if (ret) {
11281                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11282                         goto fail;
11283                 }
11284         }
11285
11286         /* Add new/modified planes */
11287         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11288                 ret = dm_update_plane_state(dc, state, plane,
11289                                             old_plane_state,
11290                                             new_plane_state,
11291                                             true,
11292                                             &lock_and_validation_needed);
11293                 if (ret) {
11294                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11295                         goto fail;
11296                 }
11297         }
11298
11299         /* Run this here since we want to validate the streams we created */
11300         ret = drm_atomic_helper_check_planes(dev, state);
11301         if (ret) {
11302                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11303                 goto fail;
11304         }
11305
11306         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11307                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11308                 if (dm_new_crtc_state->mpo_requested)
11309                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11310         }
11311
11312         /* Check cursor planes scaling */
11313         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11314                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11315                 if (ret) {
11316                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11317                         goto fail;
11318                 }
11319         }
11320
11321         if (state->legacy_cursor_update) {
11322                 /*
11323                  * This is a fast cursor update coming from the plane update
11324                  * helper, check if it can be done asynchronously for better
11325                  * performance.
11326                  */
11327                 state->async_update =
11328                         !drm_atomic_helper_async_check(dev, state);
11329
11330                 /*
11331                  * Skip the remaining global validation if this is an async
11332                  * update. Cursor updates can be done without affecting
11333                  * state or bandwidth calcs and this avoids the performance
11334                  * penalty of locking the private state object and
11335                  * allocating a new dc_state.
11336                  */
11337                 if (state->async_update)
11338                         return 0;
11339         }
11340
11341         /* Check scaling and underscan changes*/
11342         /* TODO Removed scaling changes validation due to inability to commit
11343          * new stream into context w\o causing full reset. Need to
11344          * decide how to handle.
11345          */
11346         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11347                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11348                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11349                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11350
11351                 /* Skip any modesets/resets */
11352                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11353                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11354                         continue;
11355
11356                 /* Skip any thing not scale or underscan changes */
11357                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11358                         continue;
11359
11360                 lock_and_validation_needed = true;
11361         }
11362
11363 #if defined(CONFIG_DRM_AMD_DC_DCN)
11364         /* set the slot info for each mst_state based on the link encoding format */
11365         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11366                 struct amdgpu_dm_connector *aconnector;
11367                 struct drm_connector *connector;
11368                 struct drm_connector_list_iter iter;
11369                 u8 link_coding_cap;
11370
11371                 if (!mgr->mst_state )
11372                         continue;
11373
11374                 drm_connector_list_iter_begin(dev, &iter);
11375                 drm_for_each_connector_iter(connector, &iter) {
11376                         int id = connector->index;
11377
11378                         if (id == mst_state->mgr->conn_base_id) {
11379                                 aconnector = to_amdgpu_dm_connector(connector);
11380                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11381                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11382
11383                                 break;
11384                         }
11385                 }
11386                 drm_connector_list_iter_end(&iter);
11387
11388         }
11389 #endif
11390         /**
11391          * Streams and planes are reset when there are changes that affect
11392          * bandwidth. Anything that affects bandwidth needs to go through
11393          * DC global validation to ensure that the configuration can be applied
11394          * to hardware.
11395          *
11396          * We have to currently stall out here in atomic_check for outstanding
11397          * commits to finish in this case because our IRQ handlers reference
11398          * DRM state directly - we can end up disabling interrupts too early
11399          * if we don't.
11400          *
11401          * TODO: Remove this stall and drop DM state private objects.
11402          */
11403         if (lock_and_validation_needed) {
11404                 ret = dm_atomic_get_state(state, &dm_state);
11405                 if (ret) {
11406                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11407                         goto fail;
11408                 }
11409
11410                 ret = do_aquire_global_lock(dev, state);
11411                 if (ret) {
11412                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11413                         goto fail;
11414                 }
11415
11416 #if defined(CONFIG_DRM_AMD_DC_DCN)
11417                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11418                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11419                         ret = -EINVAL;
11420                         goto fail;
11421                 }
11422
11423                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11424                 if (ret) {
11425                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11426                         goto fail;
11427                 }
11428 #endif
11429
11430                 /*
11431                  * Perform validation of MST topology in the state:
11432                  * We need to perform MST atomic check before calling
11433                  * dc_validate_global_state(), or there is a chance
11434                  * to get stuck in an infinite loop and hang eventually.
11435                  */
11436                 ret = drm_dp_mst_atomic_check(state);
11437                 if (ret) {
11438                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11439                         goto fail;
11440                 }
11441                 status = dc_validate_global_state(dc, dm_state->context, true);
11442                 if (status != DC_OK) {
11443                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11444                                        dc_status_to_str(status), status);
11445                         ret = -EINVAL;
11446                         goto fail;
11447                 }
11448         } else {
11449                 /*
11450                  * The commit is a fast update. Fast updates shouldn't change
11451                  * the DC context, affect global validation, and can have their
11452                  * commit work done in parallel with other commits not touching
11453                  * the same resource. If we have a new DC context as part of
11454                  * the DM atomic state from validation we need to free it and
11455                  * retain the existing one instead.
11456                  *
11457                  * Furthermore, since the DM atomic state only contains the DC
11458                  * context and can safely be annulled, we can free the state
11459                  * and clear the associated private object now to free
11460                  * some memory and avoid a possible use-after-free later.
11461                  */
11462
11463                 for (i = 0; i < state->num_private_objs; i++) {
11464                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11465
11466                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11467                                 int j = state->num_private_objs-1;
11468
11469                                 dm_atomic_destroy_state(obj,
11470                                                 state->private_objs[i].state);
11471
11472                                 /* If i is not at the end of the array then the
11473                                  * last element needs to be moved to where i was
11474                                  * before the array can safely be truncated.
11475                                  */
11476                                 if (i != j)
11477                                         state->private_objs[i] =
11478                                                 state->private_objs[j];
11479
11480                                 state->private_objs[j].ptr = NULL;
11481                                 state->private_objs[j].state = NULL;
11482                                 state->private_objs[j].old_state = NULL;
11483                                 state->private_objs[j].new_state = NULL;
11484
11485                                 state->num_private_objs = j;
11486                                 break;
11487                         }
11488                 }
11489         }
11490
11491         /* Store the overall update type for use later in atomic check. */
11492         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11493                 struct dm_crtc_state *dm_new_crtc_state =
11494                         to_dm_crtc_state(new_crtc_state);
11495
11496                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11497                                                          UPDATE_TYPE_FULL :
11498                                                          UPDATE_TYPE_FAST;
11499         }
11500
11501         /* Must be success */
11502         WARN_ON(ret);
11503
11504         trace_amdgpu_dm_atomic_check_finish(state, ret);
11505
11506         return ret;
11507
11508 fail:
11509         if (ret == -EDEADLK)
11510                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11511         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11512                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11513         else
11514                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11515
11516         trace_amdgpu_dm_atomic_check_finish(state, ret);
11517
11518         return ret;
11519 }
11520
11521 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11522                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11523 {
11524         uint8_t dpcd_data;
11525         bool capable = false;
11526
11527         if (amdgpu_dm_connector->dc_link &&
11528                 dm_helpers_dp_read_dpcd(
11529                                 NULL,
11530                                 amdgpu_dm_connector->dc_link,
11531                                 DP_DOWN_STREAM_PORT_COUNT,
11532                                 &dpcd_data,
11533                                 sizeof(dpcd_data))) {
11534                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11535         }
11536
11537         return capable;
11538 }
11539
11540 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11541                 unsigned int offset,
11542                 unsigned int total_length,
11543                 uint8_t *data,
11544                 unsigned int length,
11545                 struct amdgpu_hdmi_vsdb_info *vsdb)
11546 {
11547         bool res;
11548         union dmub_rb_cmd cmd;
11549         struct dmub_cmd_send_edid_cea *input;
11550         struct dmub_cmd_edid_cea_output *output;
11551
11552         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11553                 return false;
11554
11555         memset(&cmd, 0, sizeof(cmd));
11556
11557         input = &cmd.edid_cea.data.input;
11558
11559         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11560         cmd.edid_cea.header.sub_type = 0;
11561         cmd.edid_cea.header.payload_bytes =
11562                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11563         input->offset = offset;
11564         input->length = length;
11565         input->cea_total_length = total_length;
11566         memcpy(input->payload, data, length);
11567
11568         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11569         if (!res) {
11570                 DRM_ERROR("EDID CEA parser failed\n");
11571                 return false;
11572         }
11573
11574         output = &cmd.edid_cea.data.output;
11575
11576         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11577                 if (!output->ack.success) {
11578                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11579                                         output->ack.offset);
11580                 }
11581         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11582                 if (!output->amd_vsdb.vsdb_found)
11583                         return false;
11584
11585                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11586                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11587                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11588                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11589         } else {
11590                 DRM_WARN("Unknown EDID CEA parser results\n");
11591                 return false;
11592         }
11593
11594         return true;
11595 }
11596
11597 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11598                 uint8_t *edid_ext, int len,
11599                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11600 {
11601         int i;
11602
11603         /* send extension block to DMCU for parsing */
11604         for (i = 0; i < len; i += 8) {
11605                 bool res;
11606                 int offset;
11607
11608                 /* send 8 bytes a time */
11609                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11610                         return false;
11611
11612                 if (i+8 == len) {
11613                         /* EDID block sent completed, expect result */
11614                         int version, min_rate, max_rate;
11615
11616                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11617                         if (res) {
11618                                 /* amd vsdb found */
11619                                 vsdb_info->freesync_supported = 1;
11620                                 vsdb_info->amd_vsdb_version = version;
11621                                 vsdb_info->min_refresh_rate_hz = min_rate;
11622                                 vsdb_info->max_refresh_rate_hz = max_rate;
11623                                 return true;
11624                         }
11625                         /* not amd vsdb */
11626                         return false;
11627                 }
11628
11629                 /* check for ack*/
11630                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11631                 if (!res)
11632                         return false;
11633         }
11634
11635         return false;
11636 }
11637
11638 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11639                 uint8_t *edid_ext, int len,
11640                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11641 {
11642         int i;
11643
11644         /* send extension block to DMCU for parsing */
11645         for (i = 0; i < len; i += 8) {
11646                 /* send 8 bytes a time */
11647                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11648                         return false;
11649         }
11650
11651         return vsdb_info->freesync_supported;
11652 }
11653
11654 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11655                 uint8_t *edid_ext, int len,
11656                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11657 {
11658         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11659
11660         if (adev->dm.dmub_srv)
11661                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11662         else
11663                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11664 }
11665
11666 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11667                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11668 {
11669         uint8_t *edid_ext = NULL;
11670         int i;
11671         bool valid_vsdb_found = false;
11672
11673         /*----- drm_find_cea_extension() -----*/
11674         /* No EDID or EDID extensions */
11675         if (edid == NULL || edid->extensions == 0)
11676                 return -ENODEV;
11677
11678         /* Find CEA extension */
11679         for (i = 0; i < edid->extensions; i++) {
11680                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11681                 if (edid_ext[0] == CEA_EXT)
11682                         break;
11683         }
11684
11685         if (i == edid->extensions)
11686                 return -ENODEV;
11687
11688         /*----- cea_db_offsets() -----*/
11689         if (edid_ext[0] != CEA_EXT)
11690                 return -ENODEV;
11691
11692         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11693
11694         return valid_vsdb_found ? i : -ENODEV;
11695 }
11696
11697 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11698                                         struct edid *edid)
11699 {
11700         int i = 0;
11701         struct detailed_timing *timing;
11702         struct detailed_non_pixel *data;
11703         struct detailed_data_monitor_range *range;
11704         struct amdgpu_dm_connector *amdgpu_dm_connector =
11705                         to_amdgpu_dm_connector(connector);
11706         struct dm_connector_state *dm_con_state = NULL;
11707         struct dc_sink *sink;
11708
11709         struct drm_device *dev = connector->dev;
11710         struct amdgpu_device *adev = drm_to_adev(dev);
11711         bool freesync_capable = false;
11712         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11713
11714         if (!connector->state) {
11715                 DRM_ERROR("%s - Connector has no state", __func__);
11716                 goto update;
11717         }
11718
11719         sink = amdgpu_dm_connector->dc_sink ?
11720                 amdgpu_dm_connector->dc_sink :
11721                 amdgpu_dm_connector->dc_em_sink;
11722
11723         if (!edid || !sink) {
11724                 dm_con_state = to_dm_connector_state(connector->state);
11725
11726                 amdgpu_dm_connector->min_vfreq = 0;
11727                 amdgpu_dm_connector->max_vfreq = 0;
11728                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11729                 connector->display_info.monitor_range.min_vfreq = 0;
11730                 connector->display_info.monitor_range.max_vfreq = 0;
11731                 freesync_capable = false;
11732
11733                 goto update;
11734         }
11735
11736         dm_con_state = to_dm_connector_state(connector->state);
11737
11738         if (!adev->dm.freesync_module)
11739                 goto update;
11740
11741
11742         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11743                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11744                 bool edid_check_required = false;
11745
11746                 if (edid) {
11747                         edid_check_required = is_dp_capable_without_timing_msa(
11748                                                 adev->dm.dc,
11749                                                 amdgpu_dm_connector);
11750                 }
11751
11752                 if (edid_check_required == true && (edid->version > 1 ||
11753                    (edid->version == 1 && edid->revision > 1))) {
11754                         for (i = 0; i < 4; i++) {
11755
11756                                 timing  = &edid->detailed_timings[i];
11757                                 data    = &timing->data.other_data;
11758                                 range   = &data->data.range;
11759                                 /*
11760                                  * Check if monitor has continuous frequency mode
11761                                  */
11762                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11763                                         continue;
11764                                 /*
11765                                  * Check for flag range limits only. If flag == 1 then
11766                                  * no additional timing information provided.
11767                                  * Default GTF, GTF Secondary curve and CVT are not
11768                                  * supported
11769                                  */
11770                                 if (range->flags != 1)
11771                                         continue;
11772
11773                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11774                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11775                                 amdgpu_dm_connector->pixel_clock_mhz =
11776                                         range->pixel_clock_mhz * 10;
11777
11778                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11779                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11780
11781                                 break;
11782                         }
11783
11784                         if (amdgpu_dm_connector->max_vfreq -
11785                             amdgpu_dm_connector->min_vfreq > 10) {
11786
11787                                 freesync_capable = true;
11788                         }
11789                 }
11790         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11791                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11792                 if (i >= 0 && vsdb_info.freesync_supported) {
11793                         timing  = &edid->detailed_timings[i];
11794                         data    = &timing->data.other_data;
11795
11796                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11797                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11798                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11799                                 freesync_capable = true;
11800
11801                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11802                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11803                 }
11804         }
11805
11806 update:
11807         if (dm_con_state)
11808                 dm_con_state->freesync_capable = freesync_capable;
11809
11810         if (connector->vrr_capable_property)
11811                 drm_connector_set_vrr_capable_property(connector,
11812                                                        freesync_capable);
11813 }
11814
11815 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11816 {
11817         struct amdgpu_device *adev = drm_to_adev(dev);
11818         struct dc *dc = adev->dm.dc;
11819         int i;
11820
11821         mutex_lock(&adev->dm.dc_lock);
11822         if (dc->current_state) {
11823                 for (i = 0; i < dc->current_state->stream_count; ++i)
11824                         dc->current_state->streams[i]
11825                                 ->triggered_crtc_reset.enabled =
11826                                 adev->dm.force_timing_sync;
11827
11828                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11829                 dc_trigger_sync(dc, dc->current_state);
11830         }
11831         mutex_unlock(&adev->dm.dc_lock);
11832 }
11833
11834 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11835                        uint32_t value, const char *func_name)
11836 {
11837 #ifdef DM_CHECK_ADDR_0
11838         if (address == 0) {
11839                 DC_ERR("invalid register write. address = 0");
11840                 return;
11841         }
11842 #endif
11843         cgs_write_register(ctx->cgs_device, address, value);
11844         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11845 }
11846
11847 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11848                           const char *func_name)
11849 {
11850         uint32_t value;
11851 #ifdef DM_CHECK_ADDR_0
11852         if (address == 0) {
11853                 DC_ERR("invalid register read; address = 0\n");
11854                 return 0;
11855         }
11856 #endif
11857
11858         if (ctx->dmub_srv &&
11859             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11860             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11861                 ASSERT(false);
11862                 return 0;
11863         }
11864
11865         value = cgs_read_register(ctx->cgs_device, address);
11866
11867         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11868
11869         return value;
11870 }
11871
11872 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11873                                                 struct dc_context *ctx,
11874                                                 uint8_t status_type,
11875                                                 uint32_t *operation_result)
11876 {
11877         struct amdgpu_device *adev = ctx->driver_context;
11878         int return_status = -1;
11879         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11880
11881         if (is_cmd_aux) {
11882                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11883                         return_status = p_notify->aux_reply.length;
11884                         *operation_result = p_notify->result;
11885                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11886                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11887                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11888                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11889                 } else {
11890                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11891                 }
11892         } else {
11893                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11894                         return_status = 0;
11895                         *operation_result = p_notify->sc_status;
11896                 } else {
11897                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11898                 }
11899         }
11900
11901         return return_status;
11902 }
11903
11904 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11905         unsigned int link_index, void *cmd_payload, void *operation_result)
11906 {
11907         struct amdgpu_device *adev = ctx->driver_context;
11908         int ret = 0;
11909
11910         if (is_cmd_aux) {
11911                 dc_process_dmub_aux_transfer_async(ctx->dc,
11912                         link_index, (struct aux_payload *)cmd_payload);
11913         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11914                                         (struct set_config_cmd_payload *)cmd_payload,
11915                                         adev->dm.dmub_notify)) {
11916                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11917                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11918                                         (uint32_t *)operation_result);
11919         }
11920
11921         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11922         if (ret == 0) {
11923                 DRM_ERROR("wait_for_completion_timeout timeout!");
11924                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11925                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11926                                 (uint32_t *)operation_result);
11927         }
11928
11929         if (is_cmd_aux) {
11930                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11931                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11932
11933                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11934                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11935                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11936                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11937                                        adev->dm.dmub_notify->aux_reply.length);
11938                         }
11939                 }
11940         }
11941
11942         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11943                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11944                         (uint32_t *)operation_result);
11945 }
11946
11947 /*
11948  * Check whether seamless boot is supported.
11949  *
11950  * So far we only support seamless boot on CHIP_VANGOGH.
11951  * If everything goes well, we may consider expanding
11952  * seamless boot to other ASICs.
11953  */
11954 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11955 {
11956         switch (adev->asic_type) {
11957         case CHIP_VANGOGH:
11958                 if (!adev->mman.keep_stolen_vga_memory)
11959                         return true;
11960                 break;
11961         default:
11962                 break;
11963         }
11964
11965         return false;
11966 }