powercap: intel_rapl_tpmi: Enable PMU support
[linux-block.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "amdgpu.h"
28
29 #include "dc.h"
30
31 #include "core_status.h"
32 #include "core_types.h"
33 #include "hw_sequencer.h"
34 #include "dce/dce_hwseq.h"
35
36 #include "resource.h"
37 #include "dc_state.h"
38 #include "dc_state_priv.h"
39
40 #include "gpio_service_interface.h"
41 #include "clk_mgr.h"
42 #include "clock_source.h"
43 #include "dc_bios_types.h"
44
45 #include "bios_parser_interface.h"
46 #include "bios/bios_parser_helper.h"
47 #include "include/irq_service_interface.h"
48 #include "transform.h"
49 #include "dmcu.h"
50 #include "dpp.h"
51 #include "timing_generator.h"
52 #include "abm.h"
53 #include "virtual/virtual_link_encoder.h"
54 #include "hubp.h"
55
56 #include "link_hwss.h"
57 #include "link_encoder.h"
58 #include "link_enc_cfg.h"
59
60 #include "link.h"
61 #include "dm_helpers.h"
62 #include "mem_input.h"
63
64 #include "dc_dmub_srv.h"
65
66 #include "dsc.h"
67
68 #include "vm_helper.h"
69
70 #include "dce/dce_i2c.h"
71
72 #include "dmub/dmub_srv.h"
73
74 #include "dce/dmub_psr.h"
75
76 #include "dce/dmub_hw_lock_mgr.h"
77
78 #include "dc_trace.h"
79
80 #include "hw_sequencer_private.h"
81
82 #include "dml2/dml2_internal_types.h"
83
84 #include "dce/dmub_outbox.h"
85
86 #define CTX \
87         dc->ctx
88
89 #define DC_LOGGER \
90         dc->ctx->logger
91
92 static const char DC_BUILD_ID[] = "production-build";
93
94 /**
95  * DOC: Overview
96  *
97  * DC is the OS-agnostic component of the amdgpu DC driver.
98  *
99  * DC maintains and validates a set of structs representing the state of the
100  * driver and writes that state to AMD hardware
101  *
102  * Main DC HW structs:
103  *
104  * struct dc - The central struct.  One per driver.  Created on driver load,
105  * destroyed on driver unload.
106  *
107  * struct dc_context - One per driver.
108  * Used as a backpointer by most other structs in dc.
109  *
110  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
111  * plugpoints).  Created on driver load, destroyed on driver unload.
112  *
113  * struct dc_sink - One per display.  Created on boot or hotplug.
114  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
115  * (the display directly attached).  It may also have one or more remote
116  * sinks (in the Multi-Stream Transport case)
117  *
118  * struct resource_pool - One per driver.  Represents the hw blocks not in the
119  * main pipeline.  Not directly accessible by dm.
120  *
121  * Main dc state structs:
122  *
123  * These structs can be created and destroyed as needed.  There is a full set of
124  * these structs in dc->current_state representing the currently programmed state.
125  *
126  * struct dc_state - The global DC state to track global state information,
127  * such as bandwidth values.
128  *
129  * struct dc_stream_state - Represents the hw configuration for the pipeline from
130  * a framebuffer to a display.  Maps one-to-one with dc_sink.
131  *
132  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
133  * and may have more in the Multi-Plane Overlay case.
134  *
135  * struct resource_context - Represents the programmable state of everything in
136  * the resource_pool.  Not directly accessible by dm.
137  *
138  * struct pipe_ctx - A member of struct resource_context.  Represents the
139  * internal hardware pipeline components.  Each dc_plane_state has either
140  * one or two (in the pipe-split case).
141  */
142
143 /* Private functions */
144
145 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
146 {
147         if (new > *original)
148                 *original = new;
149 }
150
151 static void destroy_links(struct dc *dc)
152 {
153         uint32_t i;
154
155         for (i = 0; i < dc->link_count; i++) {
156                 if (NULL != dc->links[i])
157                         dc->link_srv->destroy_link(&dc->links[i]);
158         }
159 }
160
161 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
162 {
163         int i;
164         uint32_t count = 0;
165
166         for (i = 0; i < num_links; i++) {
167                 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
168                                 links[i]->is_internal_display)
169                         count++;
170         }
171
172         return count;
173 }
174
175 static int get_seamless_boot_stream_count(struct dc_state *ctx)
176 {
177         uint8_t i;
178         uint8_t seamless_boot_stream_count = 0;
179
180         for (i = 0; i < ctx->stream_count; i++)
181                 if (ctx->streams[i]->apply_seamless_boot_optimization)
182                         seamless_boot_stream_count++;
183
184         return seamless_boot_stream_count;
185 }
186
187 static bool create_links(
188                 struct dc *dc,
189                 uint32_t num_virtual_links)
190 {
191         int i;
192         int connectors_num;
193         struct dc_bios *bios = dc->ctx->dc_bios;
194
195         dc->link_count = 0;
196
197         connectors_num = bios->funcs->get_connectors_number(bios);
198
199         DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
200
201         if (connectors_num > ENUM_ID_COUNT) {
202                 dm_error(
203                         "DC: Number of connectors %d exceeds maximum of %d!\n",
204                         connectors_num,
205                         ENUM_ID_COUNT);
206                 return false;
207         }
208
209         dm_output_to_console(
210                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
211                 __func__,
212                 connectors_num,
213                 num_virtual_links);
214
215         for (i = 0; i < connectors_num; i++) {
216                 struct link_init_data link_init_params = {0};
217                 struct dc_link *link;
218
219                 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
220
221                 link_init_params.ctx = dc->ctx;
222                 /* next BIOS object table connector */
223                 link_init_params.connector_index = i;
224                 link_init_params.link_index = dc->link_count;
225                 link_init_params.dc = dc;
226                 link = dc->link_srv->create_link(&link_init_params);
227
228                 if (link) {
229                         dc->links[dc->link_count] = link;
230                         link->dc = dc;
231                         ++dc->link_count;
232                 }
233         }
234
235         DC_LOG_DC("BIOS object table - end");
236
237         /* Create a link for each usb4 dpia port */
238         for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
239                 struct link_init_data link_init_params = {0};
240                 struct dc_link *link;
241
242                 link_init_params.ctx = dc->ctx;
243                 link_init_params.connector_index = i;
244                 link_init_params.link_index = dc->link_count;
245                 link_init_params.dc = dc;
246                 link_init_params.is_dpia_link = true;
247
248                 link = dc->link_srv->create_link(&link_init_params);
249                 if (link) {
250                         dc->links[dc->link_count] = link;
251                         link->dc = dc;
252                         ++dc->link_count;
253                 }
254         }
255
256         for (i = 0; i < num_virtual_links; i++) {
257                 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
258                 struct encoder_init_data enc_init = {0};
259
260                 if (link == NULL) {
261                         BREAK_TO_DEBUGGER();
262                         goto failed_alloc;
263                 }
264
265                 link->link_index = dc->link_count;
266                 dc->links[dc->link_count] = link;
267                 dc->link_count++;
268
269                 link->ctx = dc->ctx;
270                 link->dc = dc;
271                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
272                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
273                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
274                 link->link_id.enum_id = ENUM_ID_1;
275                 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
276
277                 if (!link->link_enc) {
278                         BREAK_TO_DEBUGGER();
279                         goto failed_alloc;
280                 }
281
282                 link->link_status.dpcd_caps = &link->dpcd_caps;
283
284                 enc_init.ctx = dc->ctx;
285                 enc_init.channel = CHANNEL_ID_UNKNOWN;
286                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
287                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
288                 enc_init.connector = link->link_id;
289                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
290                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
291                 enc_init.encoder.enum_id = ENUM_ID_1;
292                 virtual_link_encoder_construct(link->link_enc, &enc_init);
293         }
294
295         dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
296
297         return true;
298
299 failed_alloc:
300         return false;
301 }
302
303 /* Create additional DIG link encoder objects if fewer than the platform
304  * supports were created during link construction. This can happen if the
305  * number of physical connectors is less than the number of DIGs.
306  */
307 static bool create_link_encoders(struct dc *dc)
308 {
309         bool res = true;
310         unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
311         unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
312         int i;
313
314         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
315          * link encoders and physical display endpoints and does not require
316          * additional link encoder objects.
317          */
318         if (num_usb4_dpia == 0)
319                 return res;
320
321         /* Create as many link encoder objects as the platform supports. DPIA
322          * endpoints can be programmably mapped to any DIG.
323          */
324         if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
325                 for (i = 0; i < num_dig_link_enc; i++) {
326                         struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
327
328                         if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
329                                 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
330                                                 (enum engine_id)(ENGINE_ID_DIGA + i));
331                                 if (link_enc) {
332                                         dc->res_pool->link_encoders[i] = link_enc;
333                                         dc->res_pool->dig_link_enc_count++;
334                                 } else {
335                                         res = false;
336                                 }
337                         }
338                 }
339         }
340
341         return res;
342 }
343
344 /* Destroy any additional DIG link encoder objects created by
345  * create_link_encoders().
346  * NB: Must only be called after destroy_links().
347  */
348 static void destroy_link_encoders(struct dc *dc)
349 {
350         unsigned int num_usb4_dpia;
351         unsigned int num_dig_link_enc;
352         int i;
353
354         if (!dc->res_pool)
355                 return;
356
357         num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
358         num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
359
360         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
361          * link encoders and physical display endpoints and does not require
362          * additional link encoder objects.
363          */
364         if (num_usb4_dpia == 0)
365                 return;
366
367         for (i = 0; i < num_dig_link_enc; i++) {
368                 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
369
370                 if (link_enc) {
371                         link_enc->funcs->destroy(&link_enc);
372                         dc->res_pool->link_encoders[i] = NULL;
373                         dc->res_pool->dig_link_enc_count--;
374                 }
375         }
376 }
377
378 static struct dc_perf_trace *dc_perf_trace_create(void)
379 {
380         return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
381 }
382
383 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
384 {
385         kfree(*perf_trace);
386         *perf_trace = NULL;
387 }
388
389 /**
390  *  dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
391  *  @dc:     dc reference
392  *  @stream: Initial dc stream state
393  *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
394  *
395  *  Looks up the pipe context of dc_stream_state and updates the
396  *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
397  *  Rate, which is a power-saving feature that targets reducing panel
398  *  refresh rate while the screen is static
399  *
400  *  Return: %true if the pipe context is found and adjusted;
401  *          %false if the pipe context is not found.
402  */
403 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
404                 struct dc_stream_state *stream,
405                 struct dc_crtc_timing_adjust *adjust)
406 {
407         int i;
408
409         /*
410          * Don't adjust DRR while there's bandwidth optimizations pending to
411          * avoid conflicting with firmware updates.
412          */
413         if (dc->ctx->dce_version > DCE_VERSION_MAX)
414                 if (dc->optimized_required || dc->wm_optimized_required)
415                         return false;
416
417         dc_exit_ips_for_hw_access(dc);
418
419         stream->adjust.v_total_max = adjust->v_total_max;
420         stream->adjust.v_total_mid = adjust->v_total_mid;
421         stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
422         stream->adjust.v_total_min = adjust->v_total_min;
423
424         for (i = 0; i < MAX_PIPES; i++) {
425                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
426
427                 if (pipe->stream == stream && pipe->stream_res.tg) {
428                         dc->hwss.set_drr(&pipe,
429                                         1,
430                                         *adjust);
431
432                         return true;
433                 }
434         }
435         return false;
436 }
437
438 /**
439  * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
440  * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
441  *
442  * @dc: [in] dc reference
443  * @stream: [in] Initial dc stream state
444  * @refresh_rate: [in] new refresh_rate
445  *
446  * Return: %true if the pipe context is found and there is an associated
447  *         timing_generator for the DC;
448  *         %false if the pipe context is not found or there is no
449  *         timing_generator for the DC.
450  */
451 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
452                 struct dc_stream_state *stream,
453                 uint32_t *refresh_rate)
454 {
455         bool status = false;
456
457         int i = 0;
458
459         dc_exit_ips_for_hw_access(dc);
460
461         for (i = 0; i < MAX_PIPES; i++) {
462                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
463
464                 if (pipe->stream == stream && pipe->stream_res.tg) {
465                         /* Only execute if a function pointer has been defined for
466                          * the DC version in question
467                          */
468                         if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
469                                 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
470
471                                 status = true;
472
473                                 break;
474                         }
475                 }
476         }
477
478         return status;
479 }
480
481 bool dc_stream_get_crtc_position(struct dc *dc,
482                 struct dc_stream_state **streams, int num_streams,
483                 unsigned int *v_pos, unsigned int *nom_v_pos)
484 {
485         /* TODO: Support multiple streams */
486         const struct dc_stream_state *stream = streams[0];
487         int i;
488         bool ret = false;
489         struct crtc_position position;
490
491         dc_exit_ips_for_hw_access(dc);
492
493         for (i = 0; i < MAX_PIPES; i++) {
494                 struct pipe_ctx *pipe =
495                                 &dc->current_state->res_ctx.pipe_ctx[i];
496
497                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
498                         dc->hwss.get_position(&pipe, 1, &position);
499
500                         *v_pos = position.vertical_count;
501                         *nom_v_pos = position.nominal_vcount;
502                         ret = true;
503                 }
504         }
505         return ret;
506 }
507
508 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
509 static inline void
510 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
511                 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
512 {
513         union dmub_rb_cmd cmd = {0};
514
515         cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
516         cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
517
518         if (is_stop) {
519                 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
520                 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
521         } else {
522                 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
523                 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
524                 cmd.secure_display.roi_info.x_start = rect->x;
525                 cmd.secure_display.roi_info.y_start = rect->y;
526                 cmd.secure_display.roi_info.x_end = rect->x + rect->width;
527                 cmd.secure_display.roi_info.y_end = rect->y + rect->height;
528         }
529
530         dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
531 }
532
533 static inline void
534 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
535                 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
536 {
537         if (is_stop)
538                 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
539         else
540                 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping);
541 }
542
543 bool
544 dc_stream_forward_crc_window(struct dc_stream_state *stream,
545                 struct rect *rect, bool is_stop)
546 {
547         struct dmcu *dmcu;
548         struct dc_dmub_srv *dmub_srv;
549         struct otg_phy_mux mux_mapping;
550         struct pipe_ctx *pipe;
551         int i;
552         struct dc *dc = stream->ctx->dc;
553
554         for (i = 0; i < MAX_PIPES; i++) {
555                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
556                 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
557                         break;
558         }
559
560         /* Stream not found */
561         if (i == MAX_PIPES)
562                 return false;
563
564         mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
565         mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
566
567         dmcu = dc->res_pool->dmcu;
568         dmub_srv = dc->ctx->dmub_srv;
569
570         /* forward to dmub */
571         if (dmub_srv)
572                 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop);
573         /* forward to dmcu */
574         else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
575                 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop);
576         else
577                 return false;
578
579         return true;
580 }
581 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
582
583 /**
584  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
585  * @dc: DC Object
586  * @stream: The stream to configure CRC on.
587  * @enable: Enable CRC if true, disable otherwise.
588  * @crc_window: CRC window (x/y start/end) information
589  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
590  *              once.
591  *
592  * By default, only CRC0 is configured, and the entire frame is used to
593  * calculate the CRC.
594  *
595  * Return: %false if the stream is not found or CRC capture is not supported;
596  *         %true if the stream has been configured.
597  */
598 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
599                              struct crc_params *crc_window, bool enable, bool continuous)
600 {
601         struct pipe_ctx *pipe;
602         struct crc_params param;
603         struct timing_generator *tg;
604
605         pipe = resource_get_otg_master_for_stream(
606                         &dc->current_state->res_ctx, stream);
607
608         /* Stream not found */
609         if (pipe == NULL)
610                 return false;
611
612         dc_exit_ips_for_hw_access(dc);
613
614         /* By default, capture the full frame */
615         param.windowa_x_start = 0;
616         param.windowa_y_start = 0;
617         param.windowa_x_end = pipe->stream->timing.h_addressable;
618         param.windowa_y_end = pipe->stream->timing.v_addressable;
619         param.windowb_x_start = 0;
620         param.windowb_y_start = 0;
621         param.windowb_x_end = pipe->stream->timing.h_addressable;
622         param.windowb_y_end = pipe->stream->timing.v_addressable;
623
624         if (crc_window) {
625                 param.windowa_x_start = crc_window->windowa_x_start;
626                 param.windowa_y_start = crc_window->windowa_y_start;
627                 param.windowa_x_end = crc_window->windowa_x_end;
628                 param.windowa_y_end = crc_window->windowa_y_end;
629                 param.windowb_x_start = crc_window->windowb_x_start;
630                 param.windowb_y_start = crc_window->windowb_y_start;
631                 param.windowb_x_end = crc_window->windowb_x_end;
632                 param.windowb_y_end = crc_window->windowb_y_end;
633         }
634
635         param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
636         param.odm_mode = pipe->next_odm_pipe ? 1:0;
637
638         /* Default to the union of both windows */
639         param.selection = UNION_WINDOW_A_B;
640         param.continuous_mode = continuous;
641         param.enable = enable;
642
643         tg = pipe->stream_res.tg;
644
645         /* Only call if supported */
646         if (tg->funcs->configure_crc)
647                 return tg->funcs->configure_crc(tg, &param);
648         DC_LOG_WARNING("CRC capture not supported.");
649         return false;
650 }
651
652 /**
653  * dc_stream_get_crc() - Get CRC values for the given stream.
654  *
655  * @dc: DC object.
656  * @stream: The DC stream state of the stream to get CRCs from.
657  * @r_cr: CRC value for the red component.
658  * @g_y:  CRC value for the green component.
659  * @b_cb: CRC value for the blue component.
660  *
661  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
662  *
663  * Return:
664  * %false if stream is not found, or if CRCs are not enabled.
665  */
666 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
667                        uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
668 {
669         int i;
670         struct pipe_ctx *pipe;
671         struct timing_generator *tg;
672
673         dc_exit_ips_for_hw_access(dc);
674
675         for (i = 0; i < MAX_PIPES; i++) {
676                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
677                 if (pipe->stream == stream)
678                         break;
679         }
680         /* Stream not found */
681         if (i == MAX_PIPES)
682                 return false;
683
684         tg = pipe->stream_res.tg;
685
686         if (tg->funcs->get_crc)
687                 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
688         DC_LOG_WARNING("CRC capture not supported.");
689         return false;
690 }
691
692 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
693                 enum dc_dynamic_expansion option)
694 {
695         /* OPP FMT dyn expansion updates*/
696         int i;
697         struct pipe_ctx *pipe_ctx;
698
699         dc_exit_ips_for_hw_access(dc);
700
701         for (i = 0; i < MAX_PIPES; i++) {
702                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
703                                 == stream) {
704                         pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
705                         pipe_ctx->stream_res.opp->dyn_expansion = option;
706                         pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
707                                         pipe_ctx->stream_res.opp,
708                                         COLOR_SPACE_YCBCR601,
709                                         stream->timing.display_color_depth,
710                                         stream->signal);
711                 }
712         }
713 }
714
715 void dc_stream_set_dither_option(struct dc_stream_state *stream,
716                 enum dc_dither_option option)
717 {
718         struct bit_depth_reduction_params params;
719         struct dc_link *link = stream->link;
720         struct pipe_ctx *pipes = NULL;
721         int i;
722
723         for (i = 0; i < MAX_PIPES; i++) {
724                 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
725                                 stream) {
726                         pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
727                         break;
728                 }
729         }
730
731         if (!pipes)
732                 return;
733         if (option > DITHER_OPTION_MAX)
734                 return;
735
736         dc_exit_ips_for_hw_access(stream->ctx->dc);
737
738         stream->dither_option = option;
739
740         memset(&params, 0, sizeof(params));
741         resource_build_bit_depth_reduction_params(stream, &params);
742         stream->bit_depth_params = params;
743
744         if (pipes->plane_res.xfm &&
745             pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
746                 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
747                         pipes->plane_res.xfm,
748                         pipes->plane_res.scl_data.lb_params.depth,
749                         &stream->bit_depth_params);
750         }
751
752         pipes->stream_res.opp->funcs->
753                 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
754 }
755
756 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
757 {
758         int i;
759         bool ret = false;
760         struct pipe_ctx *pipes;
761
762         dc_exit_ips_for_hw_access(dc);
763
764         for (i = 0; i < MAX_PIPES; i++) {
765                 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
766                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
767                         dc->hwss.program_gamut_remap(pipes);
768                         ret = true;
769                 }
770         }
771
772         return ret;
773 }
774
775 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
776 {
777         int i;
778         bool ret = false;
779         struct pipe_ctx *pipes;
780
781         dc_exit_ips_for_hw_access(dc);
782
783         for (i = 0; i < MAX_PIPES; i++) {
784                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
785                                 == stream) {
786
787                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
788                         dc->hwss.program_output_csc(dc,
789                                         pipes,
790                                         stream->output_color_space,
791                                         stream->csc_color_matrix.matrix,
792                                         pipes->stream_res.opp->inst);
793                         ret = true;
794                 }
795         }
796
797         return ret;
798 }
799
800 void dc_stream_set_static_screen_params(struct dc *dc,
801                 struct dc_stream_state **streams,
802                 int num_streams,
803                 const struct dc_static_screen_params *params)
804 {
805         int i, j;
806         struct pipe_ctx *pipes_affected[MAX_PIPES];
807         int num_pipes_affected = 0;
808
809         dc_exit_ips_for_hw_access(dc);
810
811         for (i = 0; i < num_streams; i++) {
812                 struct dc_stream_state *stream = streams[i];
813
814                 for (j = 0; j < MAX_PIPES; j++) {
815                         if (dc->current_state->res_ctx.pipe_ctx[j].stream
816                                         == stream) {
817                                 pipes_affected[num_pipes_affected++] =
818                                                 &dc->current_state->res_ctx.pipe_ctx[j];
819                         }
820                 }
821         }
822
823         dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
824 }
825
826 static void dc_destruct(struct dc *dc)
827 {
828         // reset link encoder assignment table on destruct
829         if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
830                 link_enc_cfg_init(dc, dc->current_state);
831
832         if (dc->current_state) {
833                 dc_state_release(dc->current_state);
834                 dc->current_state = NULL;
835         }
836
837         destroy_links(dc);
838
839         destroy_link_encoders(dc);
840
841         if (dc->clk_mgr) {
842                 dc_destroy_clk_mgr(dc->clk_mgr);
843                 dc->clk_mgr = NULL;
844         }
845
846         dc_destroy_resource_pool(dc);
847
848         if (dc->link_srv)
849                 link_destroy_link_service(&dc->link_srv);
850
851         if (dc->ctx->gpio_service)
852                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
853
854         if (dc->ctx->created_bios)
855                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
856
857         kfree(dc->ctx->logger);
858         dc_perf_trace_destroy(&dc->ctx->perf_trace);
859
860         kfree(dc->ctx);
861         dc->ctx = NULL;
862
863         kfree(dc->bw_vbios);
864         dc->bw_vbios = NULL;
865
866         kfree(dc->bw_dceip);
867         dc->bw_dceip = NULL;
868
869         kfree(dc->dcn_soc);
870         dc->dcn_soc = NULL;
871
872         kfree(dc->dcn_ip);
873         dc->dcn_ip = NULL;
874
875         kfree(dc->vm_helper);
876         dc->vm_helper = NULL;
877
878 }
879
880 static bool dc_construct_ctx(struct dc *dc,
881                 const struct dc_init_data *init_params)
882 {
883         struct dc_context *dc_ctx;
884
885         dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
886         if (!dc_ctx)
887                 return false;
888
889         dc_ctx->cgs_device = init_params->cgs_device;
890         dc_ctx->driver_context = init_params->driver;
891         dc_ctx->dc = dc;
892         dc_ctx->asic_id = init_params->asic_id;
893         dc_ctx->dc_sink_id_count = 0;
894         dc_ctx->dc_stream_id_count = 0;
895         dc_ctx->dce_environment = init_params->dce_environment;
896         dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
897         dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
898         dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets;
899
900         /* Create logger */
901         dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL);
902
903         if (!dc_ctx->logger) {
904                 kfree(dc_ctx);
905                 return false;
906         }
907
908         dc_ctx->logger->dev = adev_to_drm(init_params->driver);
909         dc->dml.logger = dc_ctx->logger;
910
911         dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
912
913         dc_ctx->perf_trace = dc_perf_trace_create();
914         if (!dc_ctx->perf_trace) {
915                 kfree(dc_ctx);
916                 ASSERT_CRITICAL(false);
917                 return false;
918         }
919
920         dc->ctx = dc_ctx;
921
922         dc->link_srv = link_create_link_service();
923         if (!dc->link_srv)
924                 return false;
925
926         return true;
927 }
928
929 static bool dc_construct(struct dc *dc,
930                 const struct dc_init_data *init_params)
931 {
932         struct dc_context *dc_ctx;
933         struct bw_calcs_dceip *dc_dceip;
934         struct bw_calcs_vbios *dc_vbios;
935         struct dcn_soc_bounding_box *dcn_soc;
936         struct dcn_ip_params *dcn_ip;
937
938         dc->config = init_params->flags;
939
940         // Allocate memory for the vm_helper
941         dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
942         if (!dc->vm_helper) {
943                 dm_error("%s: failed to create dc->vm_helper\n", __func__);
944                 goto fail;
945         }
946
947         memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
948
949         dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
950         if (!dc_dceip) {
951                 dm_error("%s: failed to create dceip\n", __func__);
952                 goto fail;
953         }
954
955         dc->bw_dceip = dc_dceip;
956
957         dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
958         if (!dc_vbios) {
959                 dm_error("%s: failed to create vbios\n", __func__);
960                 goto fail;
961         }
962
963         dc->bw_vbios = dc_vbios;
964         dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
965         if (!dcn_soc) {
966                 dm_error("%s: failed to create dcn_soc\n", __func__);
967                 goto fail;
968         }
969
970         dc->dcn_soc = dcn_soc;
971
972         dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
973         if (!dcn_ip) {
974                 dm_error("%s: failed to create dcn_ip\n", __func__);
975                 goto fail;
976         }
977
978         dc->dcn_ip = dcn_ip;
979
980         if (!dc_construct_ctx(dc, init_params)) {
981                 dm_error("%s: failed to create ctx\n", __func__);
982                 goto fail;
983         }
984
985         dc_ctx = dc->ctx;
986
987         /* Resource should construct all asic specific resources.
988          * This should be the only place where we need to parse the asic id
989          */
990         if (init_params->vbios_override)
991                 dc_ctx->dc_bios = init_params->vbios_override;
992         else {
993                 /* Create BIOS parser */
994                 struct bp_init_data bp_init_data;
995
996                 bp_init_data.ctx = dc_ctx;
997                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
998
999                 dc_ctx->dc_bios = dal_bios_parser_create(
1000                                 &bp_init_data, dc_ctx->dce_version);
1001
1002                 if (!dc_ctx->dc_bios) {
1003                         ASSERT_CRITICAL(false);
1004                         goto fail;
1005                 }
1006
1007                 dc_ctx->created_bios = true;
1008         }
1009
1010         dc->vendor_signature = init_params->vendor_signature;
1011
1012         /* Create GPIO service */
1013         dc_ctx->gpio_service = dal_gpio_service_create(
1014                         dc_ctx->dce_version,
1015                         dc_ctx->dce_environment,
1016                         dc_ctx);
1017
1018         if (!dc_ctx->gpio_service) {
1019                 ASSERT_CRITICAL(false);
1020                 goto fail;
1021         }
1022
1023         dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
1024         if (!dc->res_pool)
1025                 goto fail;
1026
1027         /* set i2c speed if not done by the respective dcnxxx__resource.c */
1028         if (dc->caps.i2c_speed_in_khz_hdcp == 0)
1029                 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
1030         if (dc->caps.max_optimizable_video_width == 0)
1031                 dc->caps.max_optimizable_video_width = 5120;
1032         dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
1033         if (!dc->clk_mgr)
1034                 goto fail;
1035 #ifdef CONFIG_DRM_AMD_DC_FP
1036         dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
1037
1038         if (dc->res_pool->funcs->update_bw_bounding_box) {
1039                 DC_FP_START();
1040                 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
1041                 DC_FP_END();
1042         }
1043 #endif
1044
1045         if (!create_links(dc, init_params->num_virtual_links))
1046                 goto fail;
1047
1048         /* Create additional DIG link encoder objects if fewer than the platform
1049          * supports were created during link construction.
1050          */
1051         if (!create_link_encoders(dc))
1052                 goto fail;
1053
1054         /* Creation of current_state must occur after dc->dml
1055          * is initialized in dc_create_resource_pool because
1056          * on creation it copies the contents of dc->dml
1057          */
1058
1059         dc->current_state = dc_state_create(dc);
1060
1061         if (!dc->current_state) {
1062                 dm_error("%s: failed to create validate ctx\n", __func__);
1063                 goto fail;
1064         }
1065
1066         return true;
1067
1068 fail:
1069         return false;
1070 }
1071
1072 static void disable_all_writeback_pipes_for_stream(
1073                 const struct dc *dc,
1074                 struct dc_stream_state *stream,
1075                 struct dc_state *context)
1076 {
1077         int i;
1078
1079         for (i = 0; i < stream->num_wb_info; i++)
1080                 stream->writeback_info[i].wb_enabled = false;
1081 }
1082
1083 static void apply_ctx_interdependent_lock(struct dc *dc,
1084                                           struct dc_state *context,
1085                                           struct dc_stream_state *stream,
1086                                           bool lock)
1087 {
1088         int i;
1089
1090         /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1091         if (dc->hwss.interdependent_update_lock)
1092                 dc->hwss.interdependent_update_lock(dc, context, lock);
1093         else {
1094                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1095                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1096                         struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1097
1098                         // Copied conditions that were previously in dce110_apply_ctx_for_surface
1099                         if (stream == pipe_ctx->stream) {
1100                                 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
1101                                         (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1102                                         dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1103                         }
1104                 }
1105         }
1106 }
1107
1108 static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
1109 {
1110         if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
1111                 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
1112
1113                 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
1114                         get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1115                 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
1116                         get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1117                 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
1118                         get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1119                 else {
1120                         if (dc->ctx->dce_version < DCN_VERSION_2_0)
1121                                 color_space_to_black_color(
1122                                         dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color));
1123                 }
1124                 if (dc->ctx->dce_version >= DCN_VERSION_2_0) {
1125                         if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
1126                                 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1127                         else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
1128                                 get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1129                         else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
1130                                 get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1131                 }
1132         }
1133 }
1134
1135 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1136 {
1137         int i, j;
1138         struct dc_state *dangling_context = dc_state_create_current_copy(dc);
1139         struct dc_state *current_ctx;
1140         struct pipe_ctx *pipe;
1141         struct timing_generator *tg;
1142
1143         if (dangling_context == NULL)
1144                 return;
1145
1146         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1147                 struct dc_stream_state *old_stream =
1148                                 dc->current_state->res_ctx.pipe_ctx[i].stream;
1149                 bool should_disable = true;
1150                 bool pipe_split_change = false;
1151
1152                 if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1153                         (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1154                         pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1155                                 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1156                 else
1157                         pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1158                                 dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1159
1160                 for (j = 0; j < context->stream_count; j++) {
1161                         if (old_stream == context->streams[j]) {
1162                                 should_disable = false;
1163                                 break;
1164                         }
1165                 }
1166                 if (!should_disable && pipe_split_change &&
1167                                 dc->current_state->stream_count != context->stream_count)
1168                         should_disable = true;
1169
1170                 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1171                                 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1172                         struct pipe_ctx *old_pipe, *new_pipe;
1173
1174                         old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1175                         new_pipe = &context->res_ctx.pipe_ctx[i];
1176
1177                         if (old_pipe->plane_state && !new_pipe->plane_state)
1178                                 should_disable = true;
1179                 }
1180
1181                 if (should_disable && old_stream) {
1182                         bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
1183                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1184                         tg = pipe->stream_res.tg;
1185                         /* When disabling plane for a phantom pipe, we must turn on the
1186                          * phantom OTG so the disable programming gets the double buffer
1187                          * update. Otherwise the pipe will be left in a partially disabled
1188                          * state that can result in underflow or hang when enabling it
1189                          * again for different use.
1190                          */
1191                         if (is_phantom) {
1192                                 if (tg->funcs->enable_crtc) {
1193                                         int main_pipe_width, main_pipe_height;
1194                                         struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
1195
1196                                         main_pipe_width = old_paired_stream->dst.width;
1197                                         main_pipe_height = old_paired_stream->dst.height;
1198                                         if (dc->hwss.blank_phantom)
1199                                                 dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
1200                                         tg->funcs->enable_crtc(tg);
1201                                 }
1202                         }
1203
1204                         if (is_phantom)
1205                                 dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
1206                         else
1207                                 dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1208                         disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1209
1210                         if (pipe->stream && pipe->plane_state) {
1211                                 set_p_state_switch_method(dc, context, pipe);
1212                                 dc_update_visual_confirm_color(dc, context, pipe);
1213                         }
1214
1215                         if (dc->hwss.apply_ctx_for_surface) {
1216                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1217                                 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1218                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1219                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1220                         }
1221                         if (dc->hwss.program_front_end_for_ctx) {
1222                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1223                                 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1224                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1225                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1226                         }
1227                         /* We need to put the phantom OTG back into it's default (disabled) state or we
1228                          * can get corruption when transition from one SubVP config to a different one.
1229                          * The OTG is set to disable on falling edge of VUPDATE so the plane disable
1230                          * will still get it's double buffer update.
1231                          */
1232                         if (is_phantom) {
1233                                 if (tg->funcs->disable_phantom_crtc)
1234                                         tg->funcs->disable_phantom_crtc(tg);
1235                         }
1236                 }
1237         }
1238
1239         current_ctx = dc->current_state;
1240         dc->current_state = dangling_context;
1241         dc_state_release(current_ctx);
1242 }
1243
1244 static void disable_vbios_mode_if_required(
1245                 struct dc *dc,
1246                 struct dc_state *context)
1247 {
1248         unsigned int i, j;
1249
1250         /* check if timing_changed, disable stream*/
1251         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1252                 struct dc_stream_state *stream = NULL;
1253                 struct dc_link *link = NULL;
1254                 struct pipe_ctx *pipe = NULL;
1255
1256                 pipe = &context->res_ctx.pipe_ctx[i];
1257                 stream = pipe->stream;
1258                 if (stream == NULL)
1259                         continue;
1260
1261                 if (stream->apply_seamless_boot_optimization)
1262                         continue;
1263
1264                 // only looking for first odm pipe
1265                 if (pipe->prev_odm_pipe)
1266                         continue;
1267
1268                 if (stream->link->local_sink &&
1269                         stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1270                         link = stream->link;
1271                 }
1272
1273                 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1274                         unsigned int enc_inst, tg_inst = 0;
1275                         unsigned int pix_clk_100hz;
1276
1277                         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1278                         if (enc_inst != ENGINE_ID_UNKNOWN) {
1279                                 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1280                                         if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1281                                                 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1282                                                         dc->res_pool->stream_enc[j]);
1283                                                 break;
1284                                         }
1285                                 }
1286
1287                                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1288                                         dc->res_pool->dp_clock_source,
1289                                         tg_inst, &pix_clk_100hz);
1290
1291                                 if (link->link_status.link_active) {
1292                                         uint32_t requested_pix_clk_100hz =
1293                                                 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1294
1295                                         if (pix_clk_100hz != requested_pix_clk_100hz) {
1296                                                 dc->link_srv->set_dpms_off(pipe);
1297                                                 pipe->stream->dpms_off = false;
1298                                         }
1299                                 }
1300                         }
1301                 }
1302         }
1303 }
1304
1305 /**
1306  * wait_for_blank_complete - wait for all active OPPs to finish pending blank
1307  * pattern updates
1308  *
1309  * @dc: [in] dc reference
1310  * @context: [in] hardware context in use
1311  */
1312 static void wait_for_blank_complete(struct dc *dc,
1313                 struct dc_state *context)
1314 {
1315         struct pipe_ctx *opp_head;
1316         struct dce_hwseq *hws = dc->hwseq;
1317         int i;
1318
1319         if (!hws->funcs.wait_for_blank_complete)
1320                 return;
1321
1322         for (i = 0; i < MAX_PIPES; i++) {
1323                 opp_head = &context->res_ctx.pipe_ctx[i];
1324
1325                 if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
1326                                 dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
1327                         continue;
1328
1329                 hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
1330         }
1331 }
1332
1333 static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
1334 {
1335         struct pipe_ctx *otg_master;
1336         struct timing_generator *tg;
1337         int i;
1338
1339         for (i = 0; i < MAX_PIPES; i++) {
1340                 otg_master = &context->res_ctx.pipe_ctx[i];
1341                 if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
1342                                 dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
1343                         continue;
1344                 tg = otg_master->stream_res.tg;
1345                 if (tg->funcs->wait_odm_doublebuffer_pending_clear)
1346                         tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
1347         }
1348
1349         /* ODM update may require to reprogram blank pattern for each OPP */
1350         wait_for_blank_complete(dc, context);
1351 }
1352
1353 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1354 {
1355         int i;
1356         PERF_TRACE();
1357         for (i = 0; i < MAX_PIPES; i++) {
1358                 int count = 0;
1359                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1360
1361                 if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
1362                         continue;
1363
1364                 /* Timeout 100 ms */
1365                 while (count < 100000) {
1366                         /* Must set to false to start with, due to OR in update function */
1367                         pipe->plane_state->status.is_flip_pending = false;
1368                         dc->hwss.update_pending_status(pipe);
1369                         if (!pipe->plane_state->status.is_flip_pending)
1370                                 break;
1371                         udelay(1);
1372                         count++;
1373                 }
1374                 ASSERT(!pipe->plane_state->status.is_flip_pending);
1375         }
1376         PERF_TRACE();
1377 }
1378
1379 /* Public functions */
1380
1381 struct dc *dc_create(const struct dc_init_data *init_params)
1382 {
1383         struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1384         unsigned int full_pipe_count;
1385
1386         if (!dc)
1387                 return NULL;
1388
1389         if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1390                 if (!dc_construct_ctx(dc, init_params))
1391                         goto destruct_dc;
1392         } else {
1393                 if (!dc_construct(dc, init_params))
1394                         goto destruct_dc;
1395
1396                 full_pipe_count = dc->res_pool->pipe_count;
1397                 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1398                         full_pipe_count--;
1399                 dc->caps.max_streams = min(
1400                                 full_pipe_count,
1401                                 dc->res_pool->stream_enc_count);
1402
1403                 dc->caps.max_links = dc->link_count;
1404                 dc->caps.max_audios = dc->res_pool->audio_count;
1405                 dc->caps.linear_pitch_alignment = 64;
1406
1407                 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1408
1409                 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1410
1411                 if (dc->res_pool->dmcu != NULL)
1412                         dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1413         }
1414
1415         dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1416         dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1417         dc->clk_reg_offsets = init_params->clk_reg_offsets;
1418
1419         /* Populate versioning information */
1420         dc->versions.dc_ver = DC_VER;
1421
1422         dc->build_id = DC_BUILD_ID;
1423
1424         DC_LOG_DC("Display Core initialized\n");
1425
1426
1427
1428         return dc;
1429
1430 destruct_dc:
1431         dc_destruct(dc);
1432         kfree(dc);
1433         return NULL;
1434 }
1435
1436 static void detect_edp_presence(struct dc *dc)
1437 {
1438         struct dc_link *edp_links[MAX_NUM_EDP];
1439         struct dc_link *edp_link = NULL;
1440         enum dc_connection_type type;
1441         int i;
1442         int edp_num;
1443
1444         dc_get_edp_links(dc, edp_links, &edp_num);
1445         if (!edp_num)
1446                 return;
1447
1448         for (i = 0; i < edp_num; i++) {
1449                 edp_link = edp_links[i];
1450                 if (dc->config.edp_not_connected) {
1451                         edp_link->edp_sink_present = false;
1452                 } else {
1453                         dc_link_detect_connection_type(edp_link, &type);
1454                         edp_link->edp_sink_present = (type != dc_connection_none);
1455                 }
1456         }
1457 }
1458
1459 void dc_hardware_init(struct dc *dc)
1460 {
1461
1462         detect_edp_presence(dc);
1463         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1464                 dc->hwss.init_hw(dc);
1465 }
1466
1467 void dc_init_callbacks(struct dc *dc,
1468                 const struct dc_callback_init *init_params)
1469 {
1470         dc->ctx->cp_psp = init_params->cp_psp;
1471 }
1472
1473 void dc_deinit_callbacks(struct dc *dc)
1474 {
1475         memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1476 }
1477
1478 void dc_destroy(struct dc **dc)
1479 {
1480         dc_destruct(*dc);
1481         kfree(*dc);
1482         *dc = NULL;
1483 }
1484
1485 static void enable_timing_multisync(
1486                 struct dc *dc,
1487                 struct dc_state *ctx)
1488 {
1489         int i, multisync_count = 0;
1490         int pipe_count = dc->res_pool->pipe_count;
1491         struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1492
1493         for (i = 0; i < pipe_count; i++) {
1494                 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1495                                 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1496                         continue;
1497                 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1498                         continue;
1499                 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1500                 multisync_count++;
1501         }
1502
1503         if (multisync_count > 0) {
1504                 dc->hwss.enable_per_frame_crtc_position_reset(
1505                         dc, multisync_count, multisync_pipes);
1506         }
1507 }
1508
1509 static void program_timing_sync(
1510                 struct dc *dc,
1511                 struct dc_state *ctx)
1512 {
1513         int i, j, k;
1514         int group_index = 0;
1515         int num_group = 0;
1516         int pipe_count = dc->res_pool->pipe_count;
1517         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1518
1519         for (i = 0; i < pipe_count; i++) {
1520                 if (!ctx->res_ctx.pipe_ctx[i].stream
1521                                 || ctx->res_ctx.pipe_ctx[i].top_pipe
1522                                 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1523                         continue;
1524
1525                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1526         }
1527
1528         for (i = 0; i < pipe_count; i++) {
1529                 int group_size = 1;
1530                 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1531                 struct pipe_ctx *pipe_set[MAX_PIPES];
1532
1533                 if (!unsynced_pipes[i])
1534                         continue;
1535
1536                 pipe_set[0] = unsynced_pipes[i];
1537                 unsynced_pipes[i] = NULL;
1538
1539                 /* Add tg to the set, search rest of the tg's for ones with
1540                  * same timing, add all tgs with same timing to the group
1541                  */
1542                 for (j = i + 1; j < pipe_count; j++) {
1543                         if (!unsynced_pipes[j])
1544                                 continue;
1545                         if (sync_type != TIMING_SYNCHRONIZABLE &&
1546                                 dc->hwss.enable_vblanks_synchronization &&
1547                                 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1548                                 resource_are_vblanks_synchronizable(
1549                                         unsynced_pipes[j]->stream,
1550                                         pipe_set[0]->stream)) {
1551                                 sync_type = VBLANK_SYNCHRONIZABLE;
1552                                 pipe_set[group_size] = unsynced_pipes[j];
1553                                 unsynced_pipes[j] = NULL;
1554                                 group_size++;
1555                         } else
1556                         if (sync_type != VBLANK_SYNCHRONIZABLE &&
1557                                 resource_are_streams_timing_synchronizable(
1558                                         unsynced_pipes[j]->stream,
1559                                         pipe_set[0]->stream)) {
1560                                 sync_type = TIMING_SYNCHRONIZABLE;
1561                                 pipe_set[group_size] = unsynced_pipes[j];
1562                                 unsynced_pipes[j] = NULL;
1563                                 group_size++;
1564                         }
1565                 }
1566
1567                 /* set first unblanked pipe as master */
1568                 for (j = 0; j < group_size; j++) {
1569                         bool is_blanked;
1570
1571                         if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1572                                 is_blanked =
1573                                         pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1574                         else
1575                                 is_blanked =
1576                                         pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1577                         if (!is_blanked) {
1578                                 if (j == 0)
1579                                         break;
1580
1581                                 swap(pipe_set[0], pipe_set[j]);
1582                                 break;
1583                         }
1584                 }
1585
1586                 for (k = 0; k < group_size; k++) {
1587                         struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
1588
1589                         status->timing_sync_info.group_id = num_group;
1590                         status->timing_sync_info.group_size = group_size;
1591                         if (k == 0)
1592                                 status->timing_sync_info.master = true;
1593                         else
1594                                 status->timing_sync_info.master = false;
1595
1596                 }
1597
1598                 /* remove any other unblanked pipes as they have already been synced */
1599                 if (dc->config.use_pipe_ctx_sync_logic) {
1600                         /* check pipe's syncd to decide which pipe to be removed */
1601                         for (j = 1; j < group_size; j++) {
1602                                 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1603                                         group_size--;
1604                                         pipe_set[j] = pipe_set[group_size];
1605                                         j--;
1606                                 } else
1607                                         /* link slave pipe's syncd with master pipe */
1608                                         pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1609                         }
1610                 } else {
1611                         /* remove any other pipes by checking valid plane */
1612                         for (j = j + 1; j < group_size; j++) {
1613                                 bool is_blanked;
1614
1615                                 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1616                                         is_blanked =
1617                                                 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1618                                 else
1619                                         is_blanked =
1620                                                 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1621                                 if (!is_blanked) {
1622                                         group_size--;
1623                                         pipe_set[j] = pipe_set[group_size];
1624                                         j--;
1625                                 }
1626                         }
1627                 }
1628
1629                 if (group_size > 1) {
1630                         if (sync_type == TIMING_SYNCHRONIZABLE) {
1631                                 dc->hwss.enable_timing_synchronization(
1632                                         dc, ctx, group_index, group_size, pipe_set);
1633                         } else
1634                                 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1635                                 dc->hwss.enable_vblanks_synchronization(
1636                                         dc, group_index, group_size, pipe_set);
1637                                 }
1638                         group_index++;
1639                 }
1640                 num_group++;
1641         }
1642 }
1643
1644 static bool streams_changed(struct dc *dc,
1645                             struct dc_stream_state *streams[],
1646                             uint8_t stream_count)
1647 {
1648         uint8_t i;
1649
1650         if (stream_count != dc->current_state->stream_count)
1651                 return true;
1652
1653         for (i = 0; i < dc->current_state->stream_count; i++) {
1654                 if (dc->current_state->streams[i] != streams[i])
1655                         return true;
1656                 if (!streams[i]->link->link_state_valid)
1657                         return true;
1658         }
1659
1660         return false;
1661 }
1662
1663 bool dc_validate_boot_timing(const struct dc *dc,
1664                                 const struct dc_sink *sink,
1665                                 struct dc_crtc_timing *crtc_timing)
1666 {
1667         struct timing_generator *tg;
1668         struct stream_encoder *se = NULL;
1669
1670         struct dc_crtc_timing hw_crtc_timing = {0};
1671
1672         struct dc_link *link = sink->link;
1673         unsigned int i, enc_inst, tg_inst = 0;
1674
1675         /* Support seamless boot on EDP displays only */
1676         if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1677                 return false;
1678         }
1679
1680         if (dc->debug.force_odm_combine)
1681                 return false;
1682
1683         /* Check for enabled DIG to identify enabled display */
1684         if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1685                 return false;
1686
1687         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1688
1689         if (enc_inst == ENGINE_ID_UNKNOWN)
1690                 return false;
1691
1692         for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1693                 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1694
1695                         se = dc->res_pool->stream_enc[i];
1696
1697                         tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1698                                 dc->res_pool->stream_enc[i]);
1699                         break;
1700                 }
1701         }
1702
1703         // tg_inst not found
1704         if (i == dc->res_pool->stream_enc_count)
1705                 return false;
1706
1707         if (tg_inst >= dc->res_pool->timing_generator_count)
1708                 return false;
1709
1710         if (tg_inst != link->link_enc->preferred_engine)
1711                 return false;
1712
1713         tg = dc->res_pool->timing_generators[tg_inst];
1714
1715         if (!tg->funcs->get_hw_timing)
1716                 return false;
1717
1718         if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1719                 return false;
1720
1721         if (crtc_timing->h_total != hw_crtc_timing.h_total)
1722                 return false;
1723
1724         if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1725                 return false;
1726
1727         if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1728                 return false;
1729
1730         if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1731                 return false;
1732
1733         if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1734                 return false;
1735
1736         if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1737                 return false;
1738
1739         if (crtc_timing->v_total != hw_crtc_timing.v_total)
1740                 return false;
1741
1742         if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1743                 return false;
1744
1745         if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1746                 return false;
1747
1748         if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1749                 return false;
1750
1751         if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1752                 return false;
1753
1754         if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1755                 return false;
1756
1757         /* block DSC for now, as VBIOS does not currently support DSC timings */
1758         if (crtc_timing->flags.DSC)
1759                 return false;
1760
1761         if (dc_is_dp_signal(link->connector_signal)) {
1762                 unsigned int pix_clk_100hz;
1763                 uint32_t numOdmPipes = 1;
1764                 uint32_t id_src[4] = {0};
1765
1766                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1767                         dc->res_pool->dp_clock_source,
1768                         tg_inst, &pix_clk_100hz);
1769
1770                 if (tg->funcs->get_optc_source)
1771                         tg->funcs->get_optc_source(tg,
1772                                                 &numOdmPipes, &id_src[0], &id_src[1]);
1773
1774                 if (numOdmPipes == 2)
1775                         pix_clk_100hz *= 2;
1776                 if (numOdmPipes == 4)
1777                         pix_clk_100hz *= 4;
1778
1779                 // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1780                 // slightly due to rounding issues in 10 kHz units.
1781                 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1782                         return false;
1783
1784                 if (!se->funcs->dp_get_pixel_format)
1785                         return false;
1786
1787                 if (!se->funcs->dp_get_pixel_format(
1788                         se,
1789                         &hw_crtc_timing.pixel_encoding,
1790                         &hw_crtc_timing.display_color_depth))
1791                         return false;
1792
1793                 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1794                         return false;
1795
1796                 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1797                         return false;
1798         }
1799
1800         if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1801                 return false;
1802         }
1803
1804         if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
1805                 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1806                 return false;
1807         }
1808
1809         return true;
1810 }
1811
1812 static inline bool should_update_pipe_for_stream(
1813                 struct dc_state *context,
1814                 struct pipe_ctx *pipe_ctx,
1815                 struct dc_stream_state *stream)
1816 {
1817         return (pipe_ctx->stream && pipe_ctx->stream == stream);
1818 }
1819
1820 static inline bool should_update_pipe_for_plane(
1821                 struct dc_state *context,
1822                 struct pipe_ctx *pipe_ctx,
1823                 struct dc_plane_state *plane_state)
1824 {
1825         return (pipe_ctx->plane_state == plane_state);
1826 }
1827
1828 void dc_enable_stereo(
1829         struct dc *dc,
1830         struct dc_state *context,
1831         struct dc_stream_state *streams[],
1832         uint8_t stream_count)
1833 {
1834         int i, j;
1835         struct pipe_ctx *pipe;
1836
1837         dc_exit_ips_for_hw_access(dc);
1838
1839         for (i = 0; i < MAX_PIPES; i++) {
1840                 if (context != NULL) {
1841                         pipe = &context->res_ctx.pipe_ctx[i];
1842                 } else {
1843                         context = dc->current_state;
1844                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1845                 }
1846
1847                 for (j = 0; pipe && j < stream_count; j++)  {
1848                         if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1849                                 dc->hwss.setup_stereo)
1850                                 dc->hwss.setup_stereo(pipe, dc);
1851                 }
1852         }
1853 }
1854
1855 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1856 {
1857         if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1858                 dc_exit_ips_for_hw_access(dc);
1859
1860                 enable_timing_multisync(dc, context);
1861                 program_timing_sync(dc, context);
1862         }
1863 }
1864
1865 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1866 {
1867         int i;
1868         unsigned int stream_mask = 0;
1869
1870         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1871                 if (context->res_ctx.pipe_ctx[i].stream)
1872                         stream_mask |= 1 << i;
1873         }
1874
1875         return stream_mask;
1876 }
1877
1878 void dc_z10_restore(const struct dc *dc)
1879 {
1880         if (dc->hwss.z10_restore)
1881                 dc->hwss.z10_restore(dc);
1882 }
1883
1884 void dc_z10_save_init(struct dc *dc)
1885 {
1886         if (dc->hwss.z10_save_init)
1887                 dc->hwss.z10_save_init(dc);
1888 }
1889
1890 /**
1891  * dc_commit_state_no_check - Apply context to the hardware
1892  *
1893  * @dc: DC object with the current status to be updated
1894  * @context: New state that will become the current status at the end of this function
1895  *
1896  * Applies given context to the hardware and copy it into current context.
1897  * It's up to the user to release the src context afterwards.
1898  *
1899  * Return: an enum dc_status result code for the operation
1900  */
1901 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1902 {
1903         struct dc_bios *dcb = dc->ctx->dc_bios;
1904         enum dc_status result = DC_ERROR_UNEXPECTED;
1905         struct pipe_ctx *pipe;
1906         int i, k, l;
1907         struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1908         struct dc_state *old_state;
1909         bool subvp_prev_use = false;
1910
1911         dc_z10_restore(dc);
1912         dc_allow_idle_optimizations(dc, false);
1913
1914         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1915                 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1916
1917                 /* Check old context for SubVP */
1918                 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
1919                 if (subvp_prev_use)
1920                         break;
1921         }
1922
1923         for (i = 0; i < context->stream_count; i++)
1924                 dc_streams[i] =  context->streams[i];
1925
1926         if (!dcb->funcs->is_accelerated_mode(dcb)) {
1927                 disable_vbios_mode_if_required(dc, context);
1928                 dc->hwss.enable_accelerated_mode(dc, context);
1929         }
1930
1931         if (context->stream_count > get_seamless_boot_stream_count(context) ||
1932                 context->stream_count == 0)
1933                 dc->hwss.prepare_bandwidth(dc, context);
1934
1935         /* When SubVP is active, all HW programming must be done while
1936          * SubVP lock is acquired
1937          */
1938         if (dc->hwss.subvp_pipe_control_lock)
1939                 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
1940
1941         if (dc->hwss.update_dsc_pg)
1942                 dc->hwss.update_dsc_pg(dc, context, false);
1943
1944         disable_dangling_plane(dc, context);
1945         /* re-program planes for existing stream, in case we need to
1946          * free up plane resource for later use
1947          */
1948         if (dc->hwss.apply_ctx_for_surface) {
1949                 for (i = 0; i < context->stream_count; i++) {
1950                         if (context->streams[i]->mode_changed)
1951                                 continue;
1952                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1953                         dc->hwss.apply_ctx_for_surface(
1954                                 dc, context->streams[i],
1955                                 context->stream_status[i].plane_count,
1956                                 context); /* use new pipe config in new context */
1957                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1958                         dc->hwss.post_unlock_program_front_end(dc, context);
1959                 }
1960         }
1961
1962         /* Program hardware */
1963         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1964                 pipe = &context->res_ctx.pipe_ctx[i];
1965                 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1966         }
1967
1968         result = dc->hwss.apply_ctx_to_hw(dc, context);
1969
1970         if (result != DC_OK) {
1971                 /* Application of dc_state to hardware stopped. */
1972                 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1973                 return result;
1974         }
1975
1976         dc_trigger_sync(dc, context);
1977
1978         /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
1979         for (i = 0; i < context->stream_count; i++) {
1980                 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
1981
1982                 context->streams[i]->update_flags.raw = 0xFFFFFFFF;
1983                 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
1984         }
1985
1986         /* Program all planes within new context*/
1987         if (dc->hwss.program_front_end_for_ctx) {
1988                 dc->hwss.interdependent_update_lock(dc, context, true);
1989                 dc->hwss.program_front_end_for_ctx(dc, context);
1990                 dc->hwss.interdependent_update_lock(dc, context, false);
1991                 dc->hwss.post_unlock_program_front_end(dc, context);
1992         }
1993
1994         if (dc->hwss.commit_subvp_config)
1995                 dc->hwss.commit_subvp_config(dc, context);
1996         if (dc->hwss.subvp_pipe_control_lock)
1997                 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
1998
1999         for (i = 0; i < context->stream_count; i++) {
2000                 const struct dc_link *link = context->streams[i]->link;
2001
2002                 if (!context->streams[i]->mode_changed)
2003                         continue;
2004
2005                 if (dc->hwss.apply_ctx_for_surface) {
2006                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
2007                         dc->hwss.apply_ctx_for_surface(
2008                                         dc, context->streams[i],
2009                                         context->stream_status[i].plane_count,
2010                                         context);
2011                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
2012                         dc->hwss.post_unlock_program_front_end(dc, context);
2013                 }
2014
2015                 /*
2016                  * enable stereo
2017                  * TODO rework dc_enable_stereo call to work with validation sets?
2018                  */
2019                 for (k = 0; k < MAX_PIPES; k++) {
2020                         pipe = &context->res_ctx.pipe_ctx[k];
2021
2022                         for (l = 0 ; pipe && l < context->stream_count; l++)  {
2023                                 if (context->streams[l] &&
2024                                         context->streams[l] == pipe->stream &&
2025                                         dc->hwss.setup_stereo)
2026                                         dc->hwss.setup_stereo(pipe, dc);
2027                         }
2028                 }
2029
2030                 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
2031                                 context->streams[i]->timing.h_addressable,
2032                                 context->streams[i]->timing.v_addressable,
2033                                 context->streams[i]->timing.h_total,
2034                                 context->streams[i]->timing.v_total,
2035                                 context->streams[i]->timing.pix_clk_100hz / 10);
2036         }
2037
2038         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
2039
2040         if (context->stream_count > get_seamless_boot_stream_count(context) ||
2041                 context->stream_count == 0) {
2042                 /* Must wait for no flips to be pending before doing optimize bw */
2043                 wait_for_no_pipes_pending(dc, context);
2044                 /*
2045                  * optimized dispclk depends on ODM setup. Need to wait for ODM
2046                  * update pending complete before optimizing bandwidth.
2047                  */
2048                 wait_for_odm_update_pending_complete(dc, context);
2049                 /* pplib is notified if disp_num changed */
2050                 dc->hwss.optimize_bandwidth(dc, context);
2051                 /* Need to do otg sync again as otg could be out of sync due to otg
2052                  * workaround applied during clock update
2053                  */
2054                 dc_trigger_sync(dc, context);
2055         }
2056
2057         if (dc->hwss.update_dsc_pg)
2058                 dc->hwss.update_dsc_pg(dc, context, true);
2059
2060         if (dc->ctx->dce_version >= DCE_VERSION_MAX)
2061                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2062         else
2063                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2064
2065         context->stream_mask = get_stream_mask(dc, context);
2066
2067         if (context->stream_mask != dc->current_state->stream_mask)
2068                 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
2069
2070         for (i = 0; i < context->stream_count; i++)
2071                 context->streams[i]->mode_changed = false;
2072
2073         /* Clear update flags that were set earlier to avoid redundant programming */
2074         for (i = 0; i < context->stream_count; i++) {
2075                 context->streams[i]->update_flags.raw = 0x0;
2076         }
2077
2078         old_state = dc->current_state;
2079         dc->current_state = context;
2080
2081         dc_state_release(old_state);
2082
2083         dc_state_retain(dc->current_state);
2084
2085         return result;
2086 }
2087
2088 static bool commit_minimal_transition_state_legacy(struct dc *dc,
2089                 struct dc_state *transition_base_context);
2090
2091 /**
2092  * dc_commit_streams - Commit current stream state
2093  *
2094  * @dc: DC object with the commit state to be configured in the hardware
2095  * @streams: Array with a list of stream state
2096  * @stream_count: Total of streams
2097  *
2098  * Function responsible for commit streams change to the hardware.
2099  *
2100  * Return:
2101  * Return DC_OK if everything work as expected, otherwise, return a dc_status
2102  * code.
2103  */
2104 enum dc_status dc_commit_streams(struct dc *dc,
2105                                  struct dc_stream_state *streams[],
2106                                  uint8_t stream_count)
2107 {
2108         int i, j;
2109         struct dc_state *context;
2110         enum dc_status res = DC_OK;
2111         struct dc_validation_set set[MAX_STREAMS] = {0};
2112         struct pipe_ctx *pipe;
2113         bool handle_exit_odm2to1 = false;
2114
2115         if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
2116                 return res;
2117
2118         if (!streams_changed(dc, streams, stream_count))
2119                 return res;
2120
2121         dc_exit_ips_for_hw_access(dc);
2122
2123         DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
2124
2125         for (i = 0; i < stream_count; i++) {
2126                 struct dc_stream_state *stream = streams[i];
2127                 struct dc_stream_status *status = dc_stream_get_status(stream);
2128
2129                 dc_stream_log(dc, stream);
2130
2131                 set[i].stream = stream;
2132
2133                 if (status) {
2134                         set[i].plane_count = status->plane_count;
2135                         for (j = 0; j < status->plane_count; j++)
2136                                 set[i].plane_states[j] = status->plane_states[j];
2137                 }
2138         }
2139
2140         /* ODM Combine 2:1 power optimization is only applied for single stream
2141          * scenario, it uses extra pipes than needed to reduce power consumption
2142          * We need to switch off this feature to make room for new streams.
2143          */
2144         if (stream_count > dc->current_state->stream_count &&
2145                         dc->current_state->stream_count == 1) {
2146                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2147                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2148                         if (pipe->next_odm_pipe)
2149                                 handle_exit_odm2to1 = true;
2150                 }
2151         }
2152
2153         if (handle_exit_odm2to1)
2154                 res = commit_minimal_transition_state_legacy(dc, dc->current_state);
2155
2156         context = dc_state_create_current_copy(dc);
2157         if (!context)
2158                 goto context_alloc_fail;
2159
2160         res = dc_validate_with_context(dc, set, stream_count, context, false);
2161         if (res != DC_OK) {
2162                 BREAK_TO_DEBUGGER();
2163                 goto fail;
2164         }
2165
2166         res = dc_commit_state_no_check(dc, context);
2167
2168         for (i = 0; i < stream_count; i++) {
2169                 for (j = 0; j < context->stream_count; j++) {
2170                         if (streams[i]->stream_id == context->streams[j]->stream_id)
2171                                 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
2172
2173                         if (dc_is_embedded_signal(streams[i]->signal)) {
2174                                 struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]);
2175
2176                                 if (dc->hwss.is_abm_supported)
2177                                         status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
2178                                 else
2179                                         status->is_abm_supported = true;
2180                         }
2181                 }
2182         }
2183
2184 fail:
2185         dc_state_release(context);
2186
2187 context_alloc_fail:
2188
2189         DC_LOG_DC("%s Finished.\n", __func__);
2190
2191         return res;
2192 }
2193
2194 bool dc_acquire_release_mpc_3dlut(
2195                 struct dc *dc, bool acquire,
2196                 struct dc_stream_state *stream,
2197                 struct dc_3dlut **lut,
2198                 struct dc_transfer_func **shaper)
2199 {
2200         int pipe_idx;
2201         bool ret = false;
2202         bool found_pipe_idx = false;
2203         const struct resource_pool *pool = dc->res_pool;
2204         struct resource_context *res_ctx = &dc->current_state->res_ctx;
2205         int mpcc_id = 0;
2206
2207         if (pool && res_ctx) {
2208                 if (acquire) {
2209                         /*find pipe idx for the given stream*/
2210                         for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
2211                                 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
2212                                         found_pipe_idx = true;
2213                                         mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
2214                                         break;
2215                                 }
2216                         }
2217                 } else
2218                         found_pipe_idx = true;/*for release pipe_idx is not required*/
2219
2220                 if (found_pipe_idx) {
2221                         if (acquire && pool->funcs->acquire_post_bldn_3dlut)
2222                                 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
2223                         else if (!acquire && pool->funcs->release_post_bldn_3dlut)
2224                                 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
2225                 }
2226         }
2227         return ret;
2228 }
2229
2230 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
2231 {
2232         int i;
2233         struct pipe_ctx *pipe;
2234
2235         for (i = 0; i < MAX_PIPES; i++) {
2236                 pipe = &context->res_ctx.pipe_ctx[i];
2237
2238                 // Don't check flip pending on phantom pipes
2239                 if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
2240                         continue;
2241
2242                 /* Must set to false to start with, due to OR in update function */
2243                 pipe->plane_state->status.is_flip_pending = false;
2244                 dc->hwss.update_pending_status(pipe);
2245                 if (pipe->plane_state->status.is_flip_pending)
2246                         return true;
2247         }
2248         return false;
2249 }
2250
2251 /* Perform updates here which need to be deferred until next vupdate
2252  *
2253  * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
2254  * but forcing lut memory to shutdown state is immediate. This causes
2255  * single frame corruption as lut gets disabled mid-frame unless shutdown
2256  * is deferred until after entering bypass.
2257  */
2258 static void process_deferred_updates(struct dc *dc)
2259 {
2260         int i = 0;
2261
2262         if (dc->debug.enable_mem_low_power.bits.cm) {
2263                 ASSERT(dc->dcn_ip->max_num_dpp);
2264                 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
2265                         if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
2266                                 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
2267         }
2268 }
2269
2270 void dc_post_update_surfaces_to_stream(struct dc *dc)
2271 {
2272         int i;
2273         struct dc_state *context = dc->current_state;
2274
2275         if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
2276                 return;
2277
2278         post_surface_trace(dc);
2279
2280         /*
2281          * Only relevant for DCN behavior where we can guarantee the optimization
2282          * is safe to apply - retain the legacy behavior for DCE.
2283          */
2284
2285         if (dc->ctx->dce_version < DCE_VERSION_MAX)
2286                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2287         else {
2288                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2289
2290                 if (is_flip_pending_in_pipes(dc, context))
2291                         return;
2292
2293                 for (i = 0; i < dc->res_pool->pipe_count; i++)
2294                         if (context->res_ctx.pipe_ctx[i].stream == NULL ||
2295                                         context->res_ctx.pipe_ctx[i].plane_state == NULL) {
2296                                 context->res_ctx.pipe_ctx[i].pipe_idx = i;
2297                                 dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
2298                         }
2299
2300                 process_deferred_updates(dc);
2301
2302                 dc->hwss.optimize_bandwidth(dc, context);
2303
2304                 if (dc->hwss.update_dsc_pg)
2305                         dc->hwss.update_dsc_pg(dc, context, true);
2306         }
2307
2308         dc->optimized_required = false;
2309         dc->wm_optimized_required = false;
2310 }
2311
2312 bool dc_set_generic_gpio_for_stereo(bool enable,
2313                 struct gpio_service *gpio_service)
2314 {
2315         enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2316         struct gpio_pin_info pin_info;
2317         struct gpio *generic;
2318         struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2319                            GFP_KERNEL);
2320
2321         if (!config)
2322                 return false;
2323         pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2324
2325         if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2326                 kfree(config);
2327                 return false;
2328         } else {
2329                 generic = dal_gpio_service_create_generic_mux(
2330                         gpio_service,
2331                         pin_info.offset,
2332                         pin_info.mask);
2333         }
2334
2335         if (!generic) {
2336                 kfree(config);
2337                 return false;
2338         }
2339
2340         gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2341
2342         config->enable_output_from_mux = enable;
2343         config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2344
2345         if (gpio_result == GPIO_RESULT_OK)
2346                 gpio_result = dal_mux_setup_config(generic, config);
2347
2348         if (gpio_result == GPIO_RESULT_OK) {
2349                 dal_gpio_close(generic);
2350                 dal_gpio_destroy_generic_mux(&generic);
2351                 kfree(config);
2352                 return true;
2353         } else {
2354                 dal_gpio_close(generic);
2355                 dal_gpio_destroy_generic_mux(&generic);
2356                 kfree(config);
2357                 return false;
2358         }
2359 }
2360
2361 static bool is_surface_in_context(
2362                 const struct dc_state *context,
2363                 const struct dc_plane_state *plane_state)
2364 {
2365         int j;
2366
2367         for (j = 0; j < MAX_PIPES; j++) {
2368                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2369
2370                 if (plane_state == pipe_ctx->plane_state) {
2371                         return true;
2372                 }
2373         }
2374
2375         return false;
2376 }
2377
2378 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2379 {
2380         union surface_update_flags *update_flags = &u->surface->update_flags;
2381         enum surface_update_type update_type = UPDATE_TYPE_FAST;
2382
2383         if (!u->plane_info)
2384                 return UPDATE_TYPE_FAST;
2385
2386         if (u->plane_info->color_space != u->surface->color_space) {
2387                 update_flags->bits.color_space_change = 1;
2388                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2389         }
2390
2391         if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2392                 update_flags->bits.horizontal_mirror_change = 1;
2393                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2394         }
2395
2396         if (u->plane_info->rotation != u->surface->rotation) {
2397                 update_flags->bits.rotation_change = 1;
2398                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2399         }
2400
2401         if (u->plane_info->format != u->surface->format) {
2402                 update_flags->bits.pixel_format_change = 1;
2403                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2404         }
2405
2406         if (u->plane_info->stereo_format != u->surface->stereo_format) {
2407                 update_flags->bits.stereo_format_change = 1;
2408                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2409         }
2410
2411         if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2412                 update_flags->bits.per_pixel_alpha_change = 1;
2413                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2414         }
2415
2416         if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2417                 update_flags->bits.global_alpha_change = 1;
2418                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2419         }
2420
2421         if (u->plane_info->dcc.enable != u->surface->dcc.enable
2422                         || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2423                         || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2424                 /* During DCC on/off, stutter period is calculated before
2425                  * DCC has fully transitioned. This results in incorrect
2426                  * stutter period calculation. Triggering a full update will
2427                  * recalculate stutter period.
2428                  */
2429                 update_flags->bits.dcc_change = 1;
2430                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2431         }
2432
2433         if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2434                         resource_pixel_format_to_bpp(u->surface->format)) {
2435                 /* different bytes per element will require full bandwidth
2436                  * and DML calculation
2437                  */
2438                 update_flags->bits.bpp_change = 1;
2439                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2440         }
2441
2442         if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2443                         || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2444                 update_flags->bits.plane_size_change = 1;
2445                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2446         }
2447
2448
2449         if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2450                         sizeof(union dc_tiling_info)) != 0) {
2451                 update_flags->bits.swizzle_change = 1;
2452                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2453
2454                 /* todo: below are HW dependent, we should add a hook to
2455                  * DCE/N resource and validated there.
2456                  */
2457                 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2458                         /* swizzled mode requires RQ to be setup properly,
2459                          * thus need to run DML to calculate RQ settings
2460                          */
2461                         update_flags->bits.bandwidth_change = 1;
2462                         elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2463                 }
2464         }
2465
2466         /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2467         return update_type;
2468 }
2469
2470 static enum surface_update_type get_scaling_info_update_type(
2471                 const struct dc *dc,
2472                 const struct dc_surface_update *u)
2473 {
2474         union surface_update_flags *update_flags = &u->surface->update_flags;
2475
2476         if (!u->scaling_info)
2477                 return UPDATE_TYPE_FAST;
2478
2479         if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2480                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2481                         || u->scaling_info->scaling_quality.integer_scaling !=
2482                                 u->surface->scaling_quality.integer_scaling
2483                         ) {
2484                 update_flags->bits.scaling_change = 1;
2485
2486                 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2487                         || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2488                                 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2489                                         || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2490                         /* Making dst rect smaller requires a bandwidth change */
2491                         update_flags->bits.bandwidth_change = 1;
2492         }
2493
2494         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2495                 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2496
2497                 update_flags->bits.scaling_change = 1;
2498                 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2499                                 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2500                         /* Making src rect bigger requires a bandwidth change */
2501                         update_flags->bits.clock_change = 1;
2502         }
2503
2504         if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
2505                 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
2506                  u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
2507                  /* Changing clip size of a large surface may result in MPC slice count change */
2508                 update_flags->bits.bandwidth_change = 1;
2509
2510         if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
2511                         u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
2512                 update_flags->bits.clip_size_change = 1;
2513
2514         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2515                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
2516                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2517                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2518                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2519                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2520                 update_flags->bits.position_change = 1;
2521
2522         if (update_flags->bits.clock_change
2523                         || update_flags->bits.bandwidth_change
2524                         || update_flags->bits.scaling_change)
2525                 return UPDATE_TYPE_FULL;
2526
2527         if (update_flags->bits.position_change ||
2528                         update_flags->bits.clip_size_change)
2529                 return UPDATE_TYPE_MED;
2530
2531         return UPDATE_TYPE_FAST;
2532 }
2533
2534 static enum surface_update_type det_surface_update(const struct dc *dc,
2535                 const struct dc_surface_update *u)
2536 {
2537         const struct dc_state *context = dc->current_state;
2538         enum surface_update_type type;
2539         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2540         union surface_update_flags *update_flags = &u->surface->update_flags;
2541
2542         if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2543                 update_flags->raw = 0xFFFFFFFF;
2544                 return UPDATE_TYPE_FULL;
2545         }
2546
2547         update_flags->raw = 0; // Reset all flags
2548
2549         type = get_plane_info_update_type(u);
2550         elevate_update_type(&overall_type, type);
2551
2552         type = get_scaling_info_update_type(dc, u);
2553         elevate_update_type(&overall_type, type);
2554
2555         if (u->flip_addr) {
2556                 update_flags->bits.addr_update = 1;
2557                 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
2558                         update_flags->bits.tmz_changed = 1;
2559                         elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
2560                 }
2561         }
2562         if (u->in_transfer_func)
2563                 update_flags->bits.in_transfer_func_change = 1;
2564
2565         if (u->input_csc_color_matrix)
2566                 update_flags->bits.input_csc_change = 1;
2567
2568         if (u->coeff_reduction_factor)
2569                 update_flags->bits.coeff_reduction_change = 1;
2570
2571         if (u->gamut_remap_matrix)
2572                 update_flags->bits.gamut_remap_change = 1;
2573
2574         if (u->blend_tf)
2575                 update_flags->bits.gamma_change = 1;
2576
2577         if (u->gamma) {
2578                 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2579
2580                 if (u->plane_info)
2581                         format = u->plane_info->format;
2582                 else if (u->surface)
2583                         format = u->surface->format;
2584
2585                 if (dce_use_lut(format))
2586                         update_flags->bits.gamma_change = 1;
2587         }
2588
2589         if (u->lut3d_func || u->func_shaper)
2590                 update_flags->bits.lut_3d = 1;
2591
2592         if (u->hdr_mult.value)
2593                 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2594                         update_flags->bits.hdr_mult = 1;
2595                         elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2596                 }
2597
2598         if (update_flags->bits.in_transfer_func_change) {
2599                 type = UPDATE_TYPE_MED;
2600                 elevate_update_type(&overall_type, type);
2601         }
2602
2603         if (update_flags->bits.lut_3d) {
2604                 type = UPDATE_TYPE_FULL;
2605                 elevate_update_type(&overall_type, type);
2606         }
2607
2608         if (dc->debug.enable_legacy_fast_update &&
2609                         (update_flags->bits.gamma_change ||
2610                         update_flags->bits.gamut_remap_change ||
2611                         update_flags->bits.input_csc_change ||
2612                         update_flags->bits.coeff_reduction_change)) {
2613                 type = UPDATE_TYPE_FULL;
2614                 elevate_update_type(&overall_type, type);
2615         }
2616         return overall_type;
2617 }
2618
2619 static enum surface_update_type check_update_surfaces_for_stream(
2620                 struct dc *dc,
2621                 struct dc_surface_update *updates,
2622                 int surface_count,
2623                 struct dc_stream_update *stream_update,
2624                 const struct dc_stream_status *stream_status)
2625 {
2626         int i;
2627         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2628
2629         if (dc->idle_optimizations_allowed)
2630                 overall_type = UPDATE_TYPE_FULL;
2631
2632         if (stream_status == NULL || stream_status->plane_count != surface_count)
2633                 overall_type = UPDATE_TYPE_FULL;
2634
2635         if (stream_update && stream_update->pending_test_pattern) {
2636                 overall_type = UPDATE_TYPE_FULL;
2637         }
2638
2639         /* some stream updates require passive update */
2640         if (stream_update) {
2641                 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2642
2643                 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2644                         (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2645                         stream_update->integer_scaling_update)
2646                         su_flags->bits.scaling = 1;
2647
2648                 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2649                         su_flags->bits.out_tf = 1;
2650
2651                 if (stream_update->abm_level)
2652                         su_flags->bits.abm_level = 1;
2653
2654                 if (stream_update->dpms_off)
2655                         su_flags->bits.dpms_off = 1;
2656
2657                 if (stream_update->gamut_remap)
2658                         su_flags->bits.gamut_remap = 1;
2659
2660                 if (stream_update->wb_update)
2661                         su_flags->bits.wb_update = 1;
2662
2663                 if (stream_update->dsc_config)
2664                         su_flags->bits.dsc_changed = 1;
2665
2666                 if (stream_update->mst_bw_update)
2667                         su_flags->bits.mst_bw = 1;
2668
2669                 if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
2670                         (stream_update->vrr_infopacket || stream_update->allow_freesync ||
2671                                 stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
2672                         su_flags->bits.fams_changed = 1;
2673
2674                 if (su_flags->raw != 0)
2675                         overall_type = UPDATE_TYPE_FULL;
2676
2677                 if (stream_update->output_csc_transform || stream_update->output_color_space)
2678                         su_flags->bits.out_csc = 1;
2679
2680                 /* Output transfer function changes do not require bandwidth recalculation,
2681                  * so don't trigger a full update
2682                  */
2683                 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2684                         su_flags->bits.out_tf = 1;
2685         }
2686
2687         for (i = 0 ; i < surface_count; i++) {
2688                 enum surface_update_type type =
2689                                 det_surface_update(dc, &updates[i]);
2690
2691                 elevate_update_type(&overall_type, type);
2692         }
2693
2694         return overall_type;
2695 }
2696
2697 /*
2698  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2699  *
2700  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2701  */
2702 enum surface_update_type dc_check_update_surfaces_for_stream(
2703                 struct dc *dc,
2704                 struct dc_surface_update *updates,
2705                 int surface_count,
2706                 struct dc_stream_update *stream_update,
2707                 const struct dc_stream_status *stream_status)
2708 {
2709         int i;
2710         enum surface_update_type type;
2711
2712         if (stream_update)
2713                 stream_update->stream->update_flags.raw = 0;
2714         for (i = 0; i < surface_count; i++)
2715                 updates[i].surface->update_flags.raw = 0;
2716
2717         type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2718         if (type == UPDATE_TYPE_FULL) {
2719                 if (stream_update) {
2720                         uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2721                         stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2722                         stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2723                 }
2724                 for (i = 0; i < surface_count; i++)
2725                         updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2726         }
2727
2728         if (type == UPDATE_TYPE_FAST) {
2729                 // If there's an available clock comparator, we use that.
2730                 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2731                         if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2732                                 dc->optimized_required = true;
2733                 // Else we fallback to mem compare.
2734                 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2735                         dc->optimized_required = true;
2736                 }
2737
2738                 dc->optimized_required |= dc->wm_optimized_required;
2739         }
2740
2741         return type;
2742 }
2743
2744 static struct dc_stream_status *stream_get_status(
2745         struct dc_state *ctx,
2746         struct dc_stream_state *stream)
2747 {
2748         uint8_t i;
2749
2750         for (i = 0; i < ctx->stream_count; i++) {
2751                 if (stream == ctx->streams[i]) {
2752                         return &ctx->stream_status[i];
2753                 }
2754         }
2755
2756         return NULL;
2757 }
2758
2759 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2760
2761 static void copy_surface_update_to_plane(
2762                 struct dc_plane_state *surface,
2763                 struct dc_surface_update *srf_update)
2764 {
2765         if (srf_update->flip_addr) {
2766                 surface->address = srf_update->flip_addr->address;
2767                 surface->flip_immediate =
2768                         srf_update->flip_addr->flip_immediate;
2769                 surface->time.time_elapsed_in_us[surface->time.index] =
2770                         srf_update->flip_addr->flip_timestamp_in_us -
2771                                 surface->time.prev_update_time_in_us;
2772                 surface->time.prev_update_time_in_us =
2773                         srf_update->flip_addr->flip_timestamp_in_us;
2774                 surface->time.index++;
2775                 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2776                         surface->time.index = 0;
2777
2778                 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2779         }
2780
2781         if (srf_update->scaling_info) {
2782                 surface->scaling_quality =
2783                                 srf_update->scaling_info->scaling_quality;
2784                 surface->dst_rect =
2785                                 srf_update->scaling_info->dst_rect;
2786                 surface->src_rect =
2787                                 srf_update->scaling_info->src_rect;
2788                 surface->clip_rect =
2789                                 srf_update->scaling_info->clip_rect;
2790         }
2791
2792         if (srf_update->plane_info) {
2793                 surface->color_space =
2794                                 srf_update->plane_info->color_space;
2795                 surface->format =
2796                                 srf_update->plane_info->format;
2797                 surface->plane_size =
2798                                 srf_update->plane_info->plane_size;
2799                 surface->rotation =
2800                                 srf_update->plane_info->rotation;
2801                 surface->horizontal_mirror =
2802                                 srf_update->plane_info->horizontal_mirror;
2803                 surface->stereo_format =
2804                                 srf_update->plane_info->stereo_format;
2805                 surface->tiling_info =
2806                                 srf_update->plane_info->tiling_info;
2807                 surface->visible =
2808                                 srf_update->plane_info->visible;
2809                 surface->per_pixel_alpha =
2810                                 srf_update->plane_info->per_pixel_alpha;
2811                 surface->global_alpha =
2812                                 srf_update->plane_info->global_alpha;
2813                 surface->global_alpha_value =
2814                                 srf_update->plane_info->global_alpha_value;
2815                 surface->dcc =
2816                                 srf_update->plane_info->dcc;
2817                 surface->layer_index =
2818                                 srf_update->plane_info->layer_index;
2819         }
2820
2821         if (srf_update->gamma &&
2822                         (surface->gamma_correction !=
2823                                         srf_update->gamma)) {
2824                 memcpy(&surface->gamma_correction->entries,
2825                         &srf_update->gamma->entries,
2826                         sizeof(struct dc_gamma_entries));
2827                 surface->gamma_correction->is_identity =
2828                         srf_update->gamma->is_identity;
2829                 surface->gamma_correction->num_entries =
2830                         srf_update->gamma->num_entries;
2831                 surface->gamma_correction->type =
2832                         srf_update->gamma->type;
2833         }
2834
2835         if (srf_update->in_transfer_func &&
2836                         (surface->in_transfer_func !=
2837                                 srf_update->in_transfer_func)) {
2838                 surface->in_transfer_func->sdr_ref_white_level =
2839                         srf_update->in_transfer_func->sdr_ref_white_level;
2840                 surface->in_transfer_func->tf =
2841                         srf_update->in_transfer_func->tf;
2842                 surface->in_transfer_func->type =
2843                         srf_update->in_transfer_func->type;
2844                 memcpy(&surface->in_transfer_func->tf_pts,
2845                         &srf_update->in_transfer_func->tf_pts,
2846                         sizeof(struct dc_transfer_func_distributed_points));
2847         }
2848
2849         if (srf_update->func_shaper &&
2850                         (surface->in_shaper_func !=
2851                         srf_update->func_shaper))
2852                 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2853                 sizeof(*surface->in_shaper_func));
2854
2855         if (srf_update->lut3d_func &&
2856                         (surface->lut3d_func !=
2857                         srf_update->lut3d_func))
2858                 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2859                 sizeof(*surface->lut3d_func));
2860
2861         if (srf_update->hdr_mult.value)
2862                 surface->hdr_mult =
2863                                 srf_update->hdr_mult;
2864
2865         if (srf_update->blend_tf &&
2866                         (surface->blend_tf !=
2867                         srf_update->blend_tf))
2868                 memcpy(surface->blend_tf, srf_update->blend_tf,
2869                 sizeof(*surface->blend_tf));
2870
2871         if (srf_update->input_csc_color_matrix)
2872                 surface->input_csc_color_matrix =
2873                         *srf_update->input_csc_color_matrix;
2874
2875         if (srf_update->coeff_reduction_factor)
2876                 surface->coeff_reduction_factor =
2877                         *srf_update->coeff_reduction_factor;
2878
2879         if (srf_update->gamut_remap_matrix)
2880                 surface->gamut_remap_matrix =
2881                         *srf_update->gamut_remap_matrix;
2882 }
2883
2884 static void copy_stream_update_to_stream(struct dc *dc,
2885                                          struct dc_state *context,
2886                                          struct dc_stream_state *stream,
2887                                          struct dc_stream_update *update)
2888 {
2889         struct dc_context *dc_ctx = dc->ctx;
2890
2891         if (update == NULL || stream == NULL)
2892                 return;
2893
2894         if (update->src.height && update->src.width)
2895                 stream->src = update->src;
2896
2897         if (update->dst.height && update->dst.width)
2898                 stream->dst = update->dst;
2899
2900         if (update->out_transfer_func &&
2901             stream->out_transfer_func != update->out_transfer_func) {
2902                 stream->out_transfer_func->sdr_ref_white_level =
2903                         update->out_transfer_func->sdr_ref_white_level;
2904                 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2905                 stream->out_transfer_func->type =
2906                         update->out_transfer_func->type;
2907                 memcpy(&stream->out_transfer_func->tf_pts,
2908                        &update->out_transfer_func->tf_pts,
2909                        sizeof(struct dc_transfer_func_distributed_points));
2910         }
2911
2912         if (update->hdr_static_metadata)
2913                 stream->hdr_static_metadata = *update->hdr_static_metadata;
2914
2915         if (update->abm_level)
2916                 stream->abm_level = *update->abm_level;
2917
2918         if (update->periodic_interrupt)
2919                 stream->periodic_interrupt = *update->periodic_interrupt;
2920
2921         if (update->gamut_remap)
2922                 stream->gamut_remap_matrix = *update->gamut_remap;
2923
2924         /* Note: this being updated after mode set is currently not a use case
2925          * however if it arises OCSC would need to be reprogrammed at the
2926          * minimum
2927          */
2928         if (update->output_color_space)
2929                 stream->output_color_space = *update->output_color_space;
2930
2931         if (update->output_csc_transform)
2932                 stream->csc_color_matrix = *update->output_csc_transform;
2933
2934         if (update->vrr_infopacket)
2935                 stream->vrr_infopacket = *update->vrr_infopacket;
2936
2937         if (update->allow_freesync)
2938                 stream->allow_freesync = *update->allow_freesync;
2939
2940         if (update->vrr_active_variable)
2941                 stream->vrr_active_variable = *update->vrr_active_variable;
2942
2943         if (update->vrr_active_fixed)
2944                 stream->vrr_active_fixed = *update->vrr_active_fixed;
2945
2946         if (update->crtc_timing_adjust)
2947                 stream->adjust = *update->crtc_timing_adjust;
2948
2949         if (update->dpms_off)
2950                 stream->dpms_off = *update->dpms_off;
2951
2952         if (update->hfvsif_infopacket)
2953                 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2954
2955         if (update->vtem_infopacket)
2956                 stream->vtem_infopacket = *update->vtem_infopacket;
2957
2958         if (update->vsc_infopacket)
2959                 stream->vsc_infopacket = *update->vsc_infopacket;
2960
2961         if (update->vsp_infopacket)
2962                 stream->vsp_infopacket = *update->vsp_infopacket;
2963
2964         if (update->adaptive_sync_infopacket)
2965                 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
2966
2967         if (update->dither_option)
2968                 stream->dither_option = *update->dither_option;
2969
2970         if (update->pending_test_pattern)
2971                 stream->test_pattern = *update->pending_test_pattern;
2972         /* update current stream with writeback info */
2973         if (update->wb_update) {
2974                 int i;
2975
2976                 stream->num_wb_info = update->wb_update->num_wb_info;
2977                 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2978                 for (i = 0; i < stream->num_wb_info; i++)
2979                         stream->writeback_info[i] =
2980                                 update->wb_update->writeback_info[i];
2981         }
2982         if (update->dsc_config) {
2983                 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2984                 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2985                 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2986                                        update->dsc_config->num_slices_v != 0);
2987
2988                 /* Use temporarry context for validating new DSC config */
2989                 struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
2990
2991                 if (dsc_validate_context) {
2992                         stream->timing.dsc_cfg = *update->dsc_config;
2993                         stream->timing.flags.DSC = enable_dsc;
2994                         if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2995                                 stream->timing.dsc_cfg = old_dsc_cfg;
2996                                 stream->timing.flags.DSC = old_dsc_enabled;
2997                                 update->dsc_config = NULL;
2998                         }
2999
3000                         dc_state_release(dsc_validate_context);
3001                 } else {
3002                         DC_ERROR("Failed to allocate new validate context for DSC change\n");
3003                         update->dsc_config = NULL;
3004                 }
3005         }
3006 }
3007
3008 static void backup_planes_and_stream_state(
3009                 struct dc_scratch_space *scratch,
3010                 struct dc_stream_state *stream)
3011 {
3012         int i;
3013         struct dc_stream_status *status = dc_stream_get_status(stream);
3014
3015         if (!status)
3016                 return;
3017
3018         for (i = 0; i < status->plane_count; i++) {
3019                 scratch->plane_states[i] = *status->plane_states[i];
3020                 scratch->gamma_correction[i] = *status->plane_states[i]->gamma_correction;
3021                 scratch->in_transfer_func[i] = *status->plane_states[i]->in_transfer_func;
3022                 scratch->lut3d_func[i] = *status->plane_states[i]->lut3d_func;
3023                 scratch->in_shaper_func[i] = *status->plane_states[i]->in_shaper_func;
3024                 scratch->blend_tf[i] = *status->plane_states[i]->blend_tf;
3025         }
3026         scratch->stream_state = *stream;
3027         if (stream->out_transfer_func)
3028                 scratch->out_transfer_func = *stream->out_transfer_func;
3029 }
3030
3031 static void restore_planes_and_stream_state(
3032                 struct dc_scratch_space *scratch,
3033                 struct dc_stream_state *stream)
3034 {
3035         int i;
3036         struct dc_stream_status *status = dc_stream_get_status(stream);
3037
3038         if (!status)
3039                 return;
3040
3041         for (i = 0; i < status->plane_count; i++) {
3042                 *status->plane_states[i] = scratch->plane_states[i];
3043                 *status->plane_states[i]->gamma_correction = scratch->gamma_correction[i];
3044                 *status->plane_states[i]->in_transfer_func = scratch->in_transfer_func[i];
3045                 *status->plane_states[i]->lut3d_func = scratch->lut3d_func[i];
3046                 *status->plane_states[i]->in_shaper_func = scratch->in_shaper_func[i];
3047                 *status->plane_states[i]->blend_tf = scratch->blend_tf[i];
3048         }
3049         *stream = scratch->stream_state;
3050         if (stream->out_transfer_func)
3051                 *stream->out_transfer_func = scratch->out_transfer_func;
3052 }
3053
3054 static bool update_planes_and_stream_state(struct dc *dc,
3055                 struct dc_surface_update *srf_updates, int surface_count,
3056                 struct dc_stream_state *stream,
3057                 struct dc_stream_update *stream_update,
3058                 enum surface_update_type *new_update_type,
3059                 struct dc_state **new_context)
3060 {
3061         struct dc_state *context;
3062         int i, j;
3063         enum surface_update_type update_type;
3064         const struct dc_stream_status *stream_status;
3065         struct dc_context *dc_ctx = dc->ctx;
3066
3067         stream_status = dc_stream_get_status(stream);
3068
3069         if (!stream_status) {
3070                 if (surface_count) /* Only an error condition if surf_count non-zero*/
3071                         ASSERT(false);
3072
3073                 return false; /* Cannot commit surface to stream that is not committed */
3074         }
3075
3076         context = dc->current_state;
3077         backup_planes_and_stream_state(&dc->current_state->scratch, stream);
3078         update_type = dc_check_update_surfaces_for_stream(
3079                         dc, srf_updates, surface_count, stream_update, stream_status);
3080
3081         /* update current stream with the new updates */
3082         copy_stream_update_to_stream(dc, context, stream, stream_update);
3083
3084         /* do not perform surface update if surface has invalid dimensions
3085          * (all zero) and no scaling_info is provided
3086          */
3087         if (surface_count > 0) {
3088                 for (i = 0; i < surface_count; i++) {
3089                         if ((srf_updates[i].surface->src_rect.width == 0 ||
3090                                  srf_updates[i].surface->src_rect.height == 0 ||
3091                                  srf_updates[i].surface->dst_rect.width == 0 ||
3092                                  srf_updates[i].surface->dst_rect.height == 0) &&
3093                                 (!srf_updates[i].scaling_info ||
3094                                   srf_updates[i].scaling_info->src_rect.width == 0 ||
3095                                   srf_updates[i].scaling_info->src_rect.height == 0 ||
3096                                   srf_updates[i].scaling_info->dst_rect.width == 0 ||
3097                                   srf_updates[i].scaling_info->dst_rect.height == 0)) {
3098                                 DC_ERROR("Invalid src/dst rects in surface update!\n");
3099                                 return false;
3100                         }
3101                 }
3102         }
3103
3104         if (update_type >= update_surface_trace_level)
3105                 update_surface_trace(dc, srf_updates, surface_count);
3106
3107         for (i = 0; i < surface_count; i++)
3108                 copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
3109
3110         if (update_type >= UPDATE_TYPE_FULL) {
3111                 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
3112
3113                 for (i = 0; i < surface_count; i++)
3114                         new_planes[i] = srf_updates[i].surface;
3115
3116                 /* initialize scratch memory for building context */
3117                 context = dc_state_create_copy(dc->current_state);
3118                 if (context == NULL) {
3119                         DC_ERROR("Failed to allocate new validate context!\n");
3120                         return false;
3121                 }
3122
3123                 /* For each full update, remove all existing phantom pipes first.
3124                  * Ensures that we have enough pipes for newly added MPO planes
3125                  */
3126                 dc_state_remove_phantom_streams_and_planes(dc, context);
3127                 dc_state_release_phantom_streams_and_planes(dc, context);
3128
3129                 /*remove old surfaces from context */
3130                 if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
3131
3132                         BREAK_TO_DEBUGGER();
3133                         goto fail;
3134                 }
3135
3136                 /* add surface to context */
3137                 if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
3138
3139                         BREAK_TO_DEBUGGER();
3140                         goto fail;
3141                 }
3142         }
3143
3144         /* save update parameters into surface */
3145         for (i = 0; i < surface_count; i++) {
3146                 struct dc_plane_state *surface = srf_updates[i].surface;
3147
3148                 if (update_type >= UPDATE_TYPE_MED) {
3149                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3150                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3151
3152                                 if (pipe_ctx->plane_state != surface)
3153                                         continue;
3154
3155                                 resource_build_scaling_params(pipe_ctx);
3156                         }
3157                 }
3158         }
3159
3160         if (update_type == UPDATE_TYPE_FULL) {
3161                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3162                         BREAK_TO_DEBUGGER();
3163                         goto fail;
3164                 }
3165
3166                 for (i = 0; i < context->stream_count; i++) {
3167                         struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
3168                                         context->streams[i]);
3169
3170                         if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
3171                                 resource_build_test_pattern_params(&context->res_ctx, otg_master);
3172                 }
3173         }
3174
3175         *new_context = context;
3176         *new_update_type = update_type;
3177         backup_planes_and_stream_state(&context->scratch, stream);
3178
3179         return true;
3180
3181 fail:
3182         dc_state_release(context);
3183
3184         return false;
3185
3186 }
3187
3188 static void commit_planes_do_stream_update(struct dc *dc,
3189                 struct dc_stream_state *stream,
3190                 struct dc_stream_update *stream_update,
3191                 enum surface_update_type update_type,
3192                 struct dc_state *context)
3193 {
3194         int j;
3195
3196         // Stream updates
3197         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3198                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3199
3200                 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
3201
3202                         if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
3203                                 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
3204
3205                         if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
3206                                         stream_update->vrr_infopacket ||
3207                                         stream_update->vsc_infopacket ||
3208                                         stream_update->vsp_infopacket ||
3209                                         stream_update->hfvsif_infopacket ||
3210                                         stream_update->adaptive_sync_infopacket ||
3211                                         stream_update->vtem_infopacket) {
3212                                 resource_build_info_frame(pipe_ctx);
3213                                 dc->hwss.update_info_frame(pipe_ctx);
3214
3215                                 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3216                                         dc->link_srv->dp_trace_source_sequence(
3217                                                         pipe_ctx->stream->link,
3218                                                         DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3219                         }
3220
3221                         if (stream_update->hdr_static_metadata &&
3222                                         stream->use_dynamic_meta &&
3223                                         dc->hwss.set_dmdata_attributes &&
3224                                         pipe_ctx->stream->dmdata_address.quad_part != 0)
3225                                 dc->hwss.set_dmdata_attributes(pipe_ctx);
3226
3227                         if (stream_update->gamut_remap)
3228                                 dc_stream_set_gamut_remap(dc, stream);
3229
3230                         if (stream_update->output_csc_transform)
3231                                 dc_stream_program_csc_matrix(dc, stream);
3232
3233                         if (stream_update->dither_option) {
3234                                 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3235                                 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3236                                                                         &pipe_ctx->stream->bit_depth_params);
3237                                 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3238                                                 &stream->bit_depth_params,
3239                                                 &stream->clamping);
3240                                 while (odm_pipe) {
3241                                         odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3242                                                         &stream->bit_depth_params,
3243                                                         &stream->clamping);
3244                                         odm_pipe = odm_pipe->next_odm_pipe;
3245                                 }
3246                         }
3247
3248
3249                         /* Full fe update*/
3250                         if (update_type == UPDATE_TYPE_FAST)
3251                                 continue;
3252
3253                         if (stream_update->dsc_config)
3254                                 dc->link_srv->update_dsc_config(pipe_ctx);
3255
3256                         if (stream_update->mst_bw_update) {
3257                                 if (stream_update->mst_bw_update->is_increase)
3258                                         dc->link_srv->increase_mst_payload(pipe_ctx,
3259                                                         stream_update->mst_bw_update->mst_stream_bw);
3260                                 else
3261                                         dc->link_srv->reduce_mst_payload(pipe_ctx,
3262                                                         stream_update->mst_bw_update->mst_stream_bw);
3263                         }
3264
3265                         if (stream_update->pending_test_pattern) {
3266                                 dc_link_dp_set_test_pattern(stream->link,
3267                                         stream->test_pattern.type,
3268                                         stream->test_pattern.color_space,
3269                                         stream->test_pattern.p_link_settings,
3270                                         stream->test_pattern.p_custom_pattern,
3271                                         stream->test_pattern.cust_pattern_size);
3272                         }
3273
3274                         if (stream_update->dpms_off) {
3275                                 if (*stream_update->dpms_off) {
3276                                         dc->link_srv->set_dpms_off(pipe_ctx);
3277                                         /* for dpms, keep acquired resources*/
3278                                         if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3279                                                 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3280
3281                                         dc->optimized_required = true;
3282
3283                                 } else {
3284                                         if (get_seamless_boot_stream_count(context) == 0)
3285                                                 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3286                                         dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3287                                 }
3288                         } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
3289                                         && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
3290                                 /*
3291                                  * Workaround for firmware issue in some receivers where they don't pick up
3292                                  * correct output color space unless DP link is disabled/re-enabled
3293                                  */
3294                                 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3295                         }
3296
3297                         if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3298                                 bool should_program_abm = true;
3299
3300                                 // if otg funcs defined check if blanked before programming
3301                                 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3302                                         if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3303                                                 should_program_abm = false;
3304
3305                                 if (should_program_abm) {
3306                                         if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3307                                                 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3308                                         } else {
3309                                                 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3310                                                         pipe_ctx->stream_res.abm, stream->abm_level);
3311                                         }
3312                                 }
3313                         }
3314                 }
3315         }
3316 }
3317
3318 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3319 {
3320         if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
3321                         || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
3322                         && stream->ctx->dce_version >= DCN_VERSION_3_1)
3323                 return true;
3324
3325         if (stream->link->replay_settings.config.replay_supported)
3326                 return true;
3327
3328         if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
3329                 return true;
3330
3331         return false;
3332 }
3333
3334 void dc_dmub_update_dirty_rect(struct dc *dc,
3335                                int surface_count,
3336                                struct dc_stream_state *stream,
3337                                struct dc_surface_update *srf_updates,
3338                                struct dc_state *context)
3339 {
3340         union dmub_rb_cmd cmd;
3341         struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3342         unsigned int i, j;
3343         unsigned int panel_inst = 0;
3344
3345         if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3346                 return;
3347
3348         if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3349                 return;
3350
3351         memset(&cmd, 0x0, sizeof(cmd));
3352         cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3353         cmd.update_dirty_rect.header.sub_type = 0;
3354         cmd.update_dirty_rect.header.payload_bytes =
3355                 sizeof(cmd.update_dirty_rect) -
3356                 sizeof(cmd.update_dirty_rect.header);
3357         update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3358         for (i = 0; i < surface_count; i++) {
3359                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3360                 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3361
3362                 if (!srf_updates[i].surface || !flip_addr)
3363                         continue;
3364                 /* Do not send in immediate flip mode */
3365                 if (srf_updates[i].surface->flip_immediate)
3366                         continue;
3367
3368                 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3369                 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3370                                 sizeof(flip_addr->dirty_rects));
3371                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3372                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3373
3374                         if (pipe_ctx->stream != stream)
3375                                 continue;
3376                         if (pipe_ctx->plane_state != plane_state)
3377                                 continue;
3378
3379                         update_dirty_rect->panel_inst = panel_inst;
3380                         update_dirty_rect->pipe_idx = j;
3381                         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
3382                 }
3383         }
3384 }
3385
3386 static void build_dmub_update_dirty_rect(
3387                 struct dc *dc,
3388                 int surface_count,
3389                 struct dc_stream_state *stream,
3390                 struct dc_surface_update *srf_updates,
3391                 struct dc_state *context,
3392                 struct dc_dmub_cmd dc_dmub_cmd[],
3393                 unsigned int *dmub_cmd_count)
3394 {
3395         union dmub_rb_cmd cmd;
3396         struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3397         unsigned int i, j;
3398         unsigned int panel_inst = 0;
3399
3400         if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3401                 return;
3402
3403         if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3404                 return;
3405
3406         memset(&cmd, 0x0, sizeof(cmd));
3407         cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3408         cmd.update_dirty_rect.header.sub_type = 0;
3409         cmd.update_dirty_rect.header.payload_bytes =
3410                 sizeof(cmd.update_dirty_rect) -
3411                 sizeof(cmd.update_dirty_rect.header);
3412         update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3413         for (i = 0; i < surface_count; i++) {
3414                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3415                 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3416
3417                 if (!srf_updates[i].surface || !flip_addr)
3418                         continue;
3419                 /* Do not send in immediate flip mode */
3420                 if (srf_updates[i].surface->flip_immediate)
3421                         continue;
3422                 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3423                 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3424                 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3425                                 sizeof(flip_addr->dirty_rects));
3426                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3427                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3428
3429                         if (pipe_ctx->stream != stream)
3430                                 continue;
3431                         if (pipe_ctx->plane_state != plane_state)
3432                                 continue;
3433                         update_dirty_rect->panel_inst = panel_inst;
3434                         update_dirty_rect->pipe_idx = j;
3435                         dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd;
3436                         dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
3437                         (*dmub_cmd_count)++;
3438                 }
3439         }
3440 }
3441
3442
3443 /**
3444  * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
3445  *
3446  * @dc: Current DC state
3447  * @srf_updates: Array of surface updates
3448  * @surface_count: Number of surfaces that have an updated
3449  * @stream: Corresponding stream to be updated in the current flip
3450  * @context: New DC state to be programmed
3451  *
3452  * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB
3453  * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array
3454  *
3455  * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required
3456  * to build an array of commands and have them sent while the OTG lock is acquired.
3457  *
3458  * Return: void
3459  */
3460 static void build_dmub_cmd_list(struct dc *dc,
3461                 struct dc_surface_update *srf_updates,
3462                 int surface_count,
3463                 struct dc_stream_state *stream,
3464                 struct dc_state *context,
3465                 struct dc_dmub_cmd dc_dmub_cmd[],
3466                 unsigned int *dmub_cmd_count)
3467 {
3468         // Initialize cmd count to 0
3469         *dmub_cmd_count = 0;
3470         build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
3471 }
3472
3473 static void commit_planes_for_stream_fast(struct dc *dc,
3474                 struct dc_surface_update *srf_updates,
3475                 int surface_count,
3476                 struct dc_stream_state *stream,
3477                 struct dc_stream_update *stream_update,
3478                 enum surface_update_type update_type,
3479                 struct dc_state *context)
3480 {
3481         int i, j;
3482         struct pipe_ctx *top_pipe_to_program = NULL;
3483         struct dc_stream_status *stream_status = NULL;
3484         dc_exit_ips_for_hw_access(dc);
3485
3486         dc_z10_restore(dc);
3487
3488         top_pipe_to_program = resource_get_otg_master_for_stream(
3489                         &context->res_ctx,
3490                         stream);
3491
3492         if (!top_pipe_to_program)
3493                 return;
3494
3495         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3496                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3497
3498                 if (pipe->stream && pipe->plane_state) {
3499                         set_p_state_switch_method(dc, context, pipe);
3500
3501                         if (dc->debug.visual_confirm)
3502                                 dc_update_visual_confirm_color(dc, context, pipe);
3503                 }
3504         }
3505
3506         for (i = 0; i < surface_count; i++) {
3507                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3508                 /*set logical flag for lock/unlock use*/
3509                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3510                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3511
3512                         if (!pipe_ctx->plane_state)
3513                                 continue;
3514                         if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3515                                 continue;
3516                         pipe_ctx->plane_state->triplebuffer_flips = false;
3517                         if (update_type == UPDATE_TYPE_FAST &&
3518                             dc->hwss.program_triplebuffer &&
3519                             !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3520                                 /*triple buffer for VUpdate  only*/
3521                                 pipe_ctx->plane_state->triplebuffer_flips = true;
3522                         }
3523                 }
3524         }
3525
3526         stream_status = dc_state_get_stream_status(context, stream);
3527
3528         build_dmub_cmd_list(dc,
3529                         srf_updates,
3530                         surface_count,
3531                         stream,
3532                         context,
3533                         context->dc_dmub_cmd,
3534                         &(context->dmub_cmd_count));
3535         hwss_build_fast_sequence(dc,
3536                         context->dc_dmub_cmd,
3537                         context->dmub_cmd_count,
3538                         context->block_sequence,
3539                         &(context->block_sequence_steps),
3540                         top_pipe_to_program,
3541                         stream_status);
3542         hwss_execute_sequence(dc,
3543                         context->block_sequence,
3544                         context->block_sequence_steps);
3545         /* Clear update flags so next flip doesn't have redundant programming
3546          * (if there's no stream update, the update flags are not cleared).
3547          * Surface updates are cleared unconditionally at the beginning of each flip,
3548          * so no need to clear here.
3549          */
3550         if (top_pipe_to_program->stream)
3551                 top_pipe_to_program->stream->update_flags.raw = 0;
3552 }
3553
3554 static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
3555 {
3556 /*
3557  * This function calls HWSS to wait for any potentially double buffered
3558  * operations to complete. It should be invoked as a pre-amble prior
3559  * to full update programming before asserting any HW locks.
3560  */
3561         int pipe_idx;
3562         int opp_inst;
3563         int opp_count = dc->res_pool->res_cap->num_opp;
3564         struct hubp *hubp;
3565         int mpcc_inst;
3566         const struct pipe_ctx *pipe_ctx;
3567
3568         for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
3569                 pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
3570
3571                 if (!pipe_ctx->stream)
3572                         continue;
3573
3574                 if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
3575                         pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
3576
3577                 hubp = pipe_ctx->plane_res.hubp;
3578                 if (!hubp)
3579                         continue;
3580
3581                 mpcc_inst = hubp->inst;
3582                 // MPCC inst is equal to pipe index in practice
3583                 for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
3584                         if ((dc->res_pool->opps[opp_inst] != NULL) &&
3585                                 (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
3586                                 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
3587                                 dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
3588                                 break;
3589                         }
3590                 }
3591         }
3592         wait_for_odm_update_pending_complete(dc, dc_context);
3593 }
3594
3595 static void commit_planes_for_stream(struct dc *dc,
3596                 struct dc_surface_update *srf_updates,
3597                 int surface_count,
3598                 struct dc_stream_state *stream,
3599                 struct dc_stream_update *stream_update,
3600                 enum surface_update_type update_type,
3601                 struct dc_state *context)
3602 {
3603         int i, j;
3604         struct pipe_ctx *top_pipe_to_program = NULL;
3605         bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3606         bool subvp_prev_use = false;
3607         bool subvp_curr_use = false;
3608         uint8_t current_stream_mask = 0;
3609
3610         // Once we apply the new subvp context to hardware it won't be in the
3611         // dc->current_state anymore, so we have to cache it before we apply
3612         // the new SubVP context
3613         subvp_prev_use = false;
3614         dc_exit_ips_for_hw_access(dc);
3615
3616         dc_z10_restore(dc);
3617         if (update_type == UPDATE_TYPE_FULL)
3618                 wait_for_outstanding_hw_updates(dc, context);
3619
3620         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3621                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3622
3623                 if (pipe->stream && pipe->plane_state) {
3624                         set_p_state_switch_method(dc, context, pipe);
3625
3626                         if (dc->debug.visual_confirm)
3627                                 dc_update_visual_confirm_color(dc, context, pipe);
3628                 }
3629         }
3630
3631         if (update_type == UPDATE_TYPE_FULL) {
3632                 dc_allow_idle_optimizations(dc, false);
3633
3634                 if (get_seamless_boot_stream_count(context) == 0)
3635                         dc->hwss.prepare_bandwidth(dc, context);
3636
3637                 if (dc->hwss.update_dsc_pg)
3638                         dc->hwss.update_dsc_pg(dc, context, false);
3639
3640                 context_clock_trace(dc, context);
3641         }
3642
3643         top_pipe_to_program = resource_get_otg_master_for_stream(
3644                                 &context->res_ctx,
3645                                 stream);
3646         ASSERT(top_pipe_to_program != NULL);
3647         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3648                 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3649
3650                 // Check old context for SubVP
3651                 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
3652                 if (subvp_prev_use)
3653                         break;
3654         }
3655
3656         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3657                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3658
3659                 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
3660                         subvp_curr_use = true;
3661                         break;
3662                 }
3663         }
3664
3665         if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3666                 struct pipe_ctx *mpcc_pipe;
3667                 struct pipe_ctx *odm_pipe;
3668
3669                 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3670                         for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3671                                 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3672         }
3673
3674         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3675                 if (top_pipe_to_program &&
3676                         top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3677                         if (should_use_dmub_lock(stream->link)) {
3678                                 union dmub_hw_lock_flags hw_locks = { 0 };
3679                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3680
3681                                 hw_locks.bits.lock_dig = 1;
3682                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3683
3684                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3685                                                         true,
3686                                                         &hw_locks,
3687                                                         &inst_flags);
3688                         } else
3689                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3690                                                 top_pipe_to_program->stream_res.tg);
3691                 }
3692
3693         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3694                 if (dc->hwss.subvp_pipe_control_lock)
3695                                 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3696                 dc->hwss.interdependent_update_lock(dc, context, true);
3697
3698         } else {
3699                 if (dc->hwss.subvp_pipe_control_lock)
3700                         dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3701                 /* Lock the top pipe while updating plane addrs, since freesync requires
3702                  *  plane addr update event triggers to be synchronized.
3703                  *  top_pipe_to_program is expected to never be NULL
3704                  */
3705                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3706         }
3707
3708         dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3709
3710         // Stream updates
3711         if (stream_update)
3712                 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3713
3714         if (surface_count == 0) {
3715                 /*
3716                  * In case of turning off screen, no need to program front end a second time.
3717                  * just return after program blank.
3718                  */
3719                 if (dc->hwss.apply_ctx_for_surface)
3720                         dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3721                 if (dc->hwss.program_front_end_for_ctx)
3722                         dc->hwss.program_front_end_for_ctx(dc, context);
3723
3724                 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3725                         dc->hwss.interdependent_update_lock(dc, context, false);
3726                 } else {
3727                         dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3728                 }
3729                 dc->hwss.post_unlock_program_front_end(dc, context);
3730
3731                 if (update_type != UPDATE_TYPE_FAST)
3732                         if (dc->hwss.commit_subvp_config)
3733                                 dc->hwss.commit_subvp_config(dc, context);
3734
3735                 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3736                  * move the SubVP lock to after the phantom pipes have been setup
3737                  */
3738                 if (dc->hwss.subvp_pipe_control_lock)
3739                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
3740                                                          NULL, subvp_prev_use);
3741                 return;
3742         }
3743
3744         if (update_type != UPDATE_TYPE_FAST) {
3745                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3746                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3747
3748                         if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP ||
3749                                 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) &&
3750                                 pipe_ctx->stream && pipe_ctx->plane_state) {
3751                                 /* Only update visual confirm for SUBVP and Mclk switching here.
3752                                  * The bar appears on all pipes, so we need to update the bar on all displays,
3753                                  * so the information doesn't get stale.
3754                                  */
3755                                 dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
3756                                                 pipe_ctx->plane_res.hubp->inst);
3757                         }
3758                 }
3759         }
3760
3761         for (i = 0; i < surface_count; i++) {
3762                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3763                 /*set logical flag for lock/unlock use*/
3764                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3765                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3766                         if (!pipe_ctx->plane_state)
3767                                 continue;
3768                         if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3769                                 continue;
3770                         pipe_ctx->plane_state->triplebuffer_flips = false;
3771                         if (update_type == UPDATE_TYPE_FAST &&
3772                                 dc->hwss.program_triplebuffer != NULL &&
3773                                 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3774                                         /*triple buffer for VUpdate  only*/
3775                                         pipe_ctx->plane_state->triplebuffer_flips = true;
3776                         }
3777                 }
3778                 if (update_type == UPDATE_TYPE_FULL) {
3779                         /* force vsync flip when reconfiguring pipes to prevent underflow */
3780                         plane_state->flip_immediate = false;
3781                 }
3782         }
3783
3784         // Update Type FULL, Surface updates
3785         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3786                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3787
3788                 if (!pipe_ctx->top_pipe &&
3789                         !pipe_ctx->prev_odm_pipe &&
3790                         should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3791                         struct dc_stream_status *stream_status = NULL;
3792
3793                         if (!pipe_ctx->plane_state)
3794                                 continue;
3795
3796                         /* Full fe update*/
3797                         if (update_type == UPDATE_TYPE_FAST)
3798                                 continue;
3799
3800                         ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3801
3802                         if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3803                                 /*turn off triple buffer for full update*/
3804                                 dc->hwss.program_triplebuffer(
3805                                         dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3806                         }
3807                         stream_status =
3808                                 stream_get_status(context, pipe_ctx->stream);
3809
3810                         if (dc->hwss.apply_ctx_for_surface)
3811                                 dc->hwss.apply_ctx_for_surface(
3812                                         dc, pipe_ctx->stream, stream_status->plane_count, context);
3813                 }
3814         }
3815         if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3816                 dc->hwss.program_front_end_for_ctx(dc, context);
3817                 if (dc->debug.validate_dml_output) {
3818                         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3819                                 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3820                                 if (cur_pipe->stream == NULL)
3821                                         continue;
3822
3823                                 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3824                                                 cur_pipe->plane_res.hubp, dc->ctx,
3825                                                 &context->res_ctx.pipe_ctx[i].rq_regs,
3826                                                 &context->res_ctx.pipe_ctx[i].dlg_regs,
3827                                                 &context->res_ctx.pipe_ctx[i].ttu_regs);
3828                         }
3829                 }
3830         }
3831
3832         // Update Type FAST, Surface updates
3833         if (update_type == UPDATE_TYPE_FAST) {
3834                 if (dc->hwss.set_flip_control_gsl)
3835                         for (i = 0; i < surface_count; i++) {
3836                                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3837
3838                                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3839                                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3840
3841                                         if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3842                                                 continue;
3843
3844                                         if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3845                                                 continue;
3846
3847                                         // GSL has to be used for flip immediate
3848                                         dc->hwss.set_flip_control_gsl(pipe_ctx,
3849                                                         pipe_ctx->plane_state->flip_immediate);
3850                                 }
3851                         }
3852
3853                 /* Perform requested Updates */
3854                 for (i = 0; i < surface_count; i++) {
3855                         struct dc_plane_state *plane_state = srf_updates[i].surface;
3856
3857                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3858                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3859
3860                                 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3861                                         continue;
3862
3863                                 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3864                                         continue;
3865
3866                                 /*program triple buffer after lock based on flip type*/
3867                                 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3868                                         /*only enable triplebuffer for  fast_update*/
3869                                         dc->hwss.program_triplebuffer(
3870                                                 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3871                                 }
3872                                 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3873                                         dc->hwss.update_plane_addr(dc, pipe_ctx);
3874                         }
3875                 }
3876         }
3877
3878         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3879                 dc->hwss.interdependent_update_lock(dc, context, false);
3880         } else {
3881                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3882         }
3883
3884         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3885                 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3886                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3887                                 top_pipe_to_program->stream_res.tg,
3888                                 CRTC_STATE_VACTIVE);
3889                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3890                                 top_pipe_to_program->stream_res.tg,
3891                                 CRTC_STATE_VBLANK);
3892                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3893                                 top_pipe_to_program->stream_res.tg,
3894                                 CRTC_STATE_VACTIVE);
3895
3896                         if (should_use_dmub_lock(stream->link)) {
3897                                 union dmub_hw_lock_flags hw_locks = { 0 };
3898                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3899
3900                                 hw_locks.bits.lock_dig = 1;
3901                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3902
3903                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3904                                                         false,
3905                                                         &hw_locks,
3906                                                         &inst_flags);
3907                         } else
3908                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3909                                         top_pipe_to_program->stream_res.tg);
3910                 }
3911
3912         if (subvp_curr_use) {
3913                 /* If enabling subvp or transitioning from subvp->subvp, enable the
3914                  * phantom streams before we program front end for the phantom pipes.
3915                  */
3916                 if (update_type != UPDATE_TYPE_FAST) {
3917                         if (dc->hwss.enable_phantom_streams)
3918                                 dc->hwss.enable_phantom_streams(dc, context);
3919                 }
3920         }
3921
3922         if (update_type != UPDATE_TYPE_FAST)
3923                 dc->hwss.post_unlock_program_front_end(dc, context);
3924
3925         if (subvp_prev_use && !subvp_curr_use) {
3926                 /* If disabling subvp, disable phantom streams after front end
3927                  * programming has completed (we turn on phantom OTG in order
3928                  * to complete the plane disable for phantom pipes).
3929                  */
3930
3931                 if (dc->hwss.disable_phantom_streams)
3932                         dc->hwss.disable_phantom_streams(dc, context);
3933         }
3934
3935         if (update_type != UPDATE_TYPE_FAST)
3936                 if (dc->hwss.commit_subvp_config)
3937                         dc->hwss.commit_subvp_config(dc, context);
3938         /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3939          * move the SubVP lock to after the phantom pipes have been setup
3940          */
3941         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3942                 if (dc->hwss.subvp_pipe_control_lock)
3943                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3944         } else {
3945                 if (dc->hwss.subvp_pipe_control_lock)
3946                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3947         }
3948
3949         // Fire manual trigger only when bottom plane is flipped
3950         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3951                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3952
3953                 if (!pipe_ctx->plane_state)
3954                         continue;
3955
3956                 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3957                                 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3958                                 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3959                                 pipe_ctx->plane_state->skip_manual_trigger)
3960                         continue;
3961
3962                 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3963                         pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3964         }
3965
3966         current_stream_mask = get_stream_mask(dc, context);
3967         if (current_stream_mask != context->stream_mask) {
3968                 context->stream_mask = current_stream_mask;
3969                 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask);
3970         }
3971 }
3972
3973 /**
3974  * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change
3975  *
3976  * @dc: Used to get the current state status
3977  * @stream: Target stream, which we want to remove the attached planes
3978  * @srf_updates: Array of surface updates
3979  * @surface_count: Number of surface update
3980  * @is_plane_addition: [in] Fill out with true if it is a plane addition case
3981  *
3982  * DCN32x and newer support a feature named Dynamic ODM which can conflict with
3983  * the MPO if used simultaneously in some specific configurations (e.g.,
3984  * 4k@144). This function checks if the incoming context requires applying a
3985  * transition state with unnecessary pipe splitting and ODM disabled to
3986  * circumvent our hardware limitations to prevent this edge case. If the OPP
3987  * associated with an MPCC might change due to plane additions, this function
3988  * returns true.
3989  *
3990  * Return:
3991  * Return true if OPP and MPCC might change, otherwise, return false.
3992  */
3993 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
3994                 struct dc_stream_state *stream,
3995                 struct dc_surface_update *srf_updates,
3996                 int surface_count,
3997                 bool *is_plane_addition)
3998 {
3999
4000         struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
4001         bool force_minimal_pipe_splitting = false;
4002         bool subvp_active = false;
4003         uint32_t i;
4004
4005         *is_plane_addition = false;
4006
4007         if (cur_stream_status &&
4008                         dc->current_state->stream_count > 0 &&
4009                         dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
4010                 /* determine if minimal transition is required due to MPC*/
4011                 if (surface_count > 0) {
4012                         if (cur_stream_status->plane_count > surface_count) {
4013                                 force_minimal_pipe_splitting = true;
4014                         } else if (cur_stream_status->plane_count < surface_count) {
4015                                 force_minimal_pipe_splitting = true;
4016                                 *is_plane_addition = true;
4017                         }
4018                 }
4019         }
4020
4021         if (cur_stream_status &&
4022                         dc->current_state->stream_count == 1 &&
4023                         dc->debug.enable_single_display_2to1_odm_policy) {
4024                 /* determine if minimal transition is required due to dynamic ODM*/
4025                 if (surface_count > 0) {
4026                         if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
4027                                 force_minimal_pipe_splitting = true;
4028                         } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
4029                                 force_minimal_pipe_splitting = true;
4030                                 *is_plane_addition = true;
4031                         }
4032                 }
4033         }
4034
4035         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4036                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4037
4038                 if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
4039                         subvp_active = true;
4040                         break;
4041                 }
4042         }
4043
4044         /* For SubVP when adding or removing planes we need to add a minimal transition
4045          * (even when disabling all planes). Whenever disabling a phantom pipe, we
4046          * must use the minimal transition path to disable the pipe correctly.
4047          *
4048          * We want to use the minimal transition whenever subvp is active, not only if
4049          * a plane is being added / removed from a subvp stream (MPO plane can be added
4050          * to a DRR pipe of SubVP + DRR config, in which case we still want to run through
4051          * a min transition to disable subvp.
4052          */
4053         if (cur_stream_status && subvp_active) {
4054                 /* determine if minimal transition is required due to SubVP*/
4055                 if (cur_stream_status->plane_count > surface_count) {
4056                         force_minimal_pipe_splitting = true;
4057                 } else if (cur_stream_status->plane_count < surface_count) {
4058                         force_minimal_pipe_splitting = true;
4059                         *is_plane_addition = true;
4060                 }
4061         }
4062
4063         return force_minimal_pipe_splitting;
4064 }
4065
4066 struct pipe_split_policy_backup {
4067         bool dynamic_odm_policy;
4068         bool subvp_policy;
4069         enum pipe_split_policy mpc_policy;
4070 };
4071
4072 static void release_minimal_transition_state(struct dc *dc,
4073                 struct dc_state *context, struct pipe_split_policy_backup *policy)
4074 {
4075         dc_state_release(context);
4076         /* restore previous pipe split and odm policy */
4077         if (!dc->config.is_vmin_only_asic)
4078                 dc->debug.pipe_split_policy = policy->mpc_policy;
4079         dc->debug.enable_single_display_2to1_odm_policy = policy->dynamic_odm_policy;
4080         dc->debug.force_disable_subvp = policy->subvp_policy;
4081 }
4082
4083 static struct dc_state *create_minimal_transition_state(struct dc *dc,
4084                 struct dc_state *base_context, struct pipe_split_policy_backup *policy)
4085 {
4086         struct dc_state *minimal_transition_context = NULL;
4087         unsigned int i, j;
4088
4089         if (!dc->config.is_vmin_only_asic) {
4090                 policy->mpc_policy = dc->debug.pipe_split_policy;
4091                 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
4092         }
4093         policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
4094         dc->debug.enable_single_display_2to1_odm_policy = false;
4095         policy->subvp_policy = dc->debug.force_disable_subvp;
4096         dc->debug.force_disable_subvp = true;
4097
4098         minimal_transition_context = dc_state_create_copy(base_context);
4099         if (!minimal_transition_context)
4100                 return NULL;
4101
4102         /* commit minimal state */
4103         if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
4104                 for (i = 0; i < minimal_transition_context->stream_count; i++) {
4105                         struct dc_stream_status *stream_status = &minimal_transition_context->stream_status[i];
4106
4107                         for (j = 0; j < stream_status->plane_count; j++) {
4108                                 struct dc_plane_state *plane_state = stream_status->plane_states[j];
4109
4110                                 /* force vsync flip when reconfiguring pipes to prevent underflow
4111                                  * and corruption
4112                                  */
4113                                 plane_state->flip_immediate = false;
4114                         }
4115                 }
4116         } else {
4117                 /* this should never happen */
4118                 release_minimal_transition_state(dc, minimal_transition_context, policy);
4119                 BREAK_TO_DEBUGGER();
4120                 minimal_transition_context = NULL;
4121         }
4122         return minimal_transition_context;
4123 }
4124
4125
4126 /**
4127  * commit_minimal_transition_state - Commit a minimal state based on current or new context
4128  *
4129  * @dc: DC structure, used to get the current state
4130  * @context: New context
4131  * @stream: Stream getting the update for the flip
4132  *
4133  * The function takes in current state and new state and determine a minimal transition state
4134  * as the intermediate step which could make the transition between current and new states
4135  * seamless. If found, it will commit the minimal transition state and update current state to
4136  * this minimal transition state and return true, if not, it will return false.
4137  *
4138  * Return:
4139  * Return True if the minimal transition succeeded, false otherwise
4140  */
4141 static bool commit_minimal_transition_state(struct dc *dc,
4142                 struct dc_state *context,
4143                 struct dc_stream_state *stream)
4144 {
4145         bool success = false;
4146         struct dc_state *minimal_transition_context;
4147         struct pipe_split_policy_backup policy;
4148
4149         /* commit based on new context */
4150         minimal_transition_context = create_minimal_transition_state(dc,
4151                         context, &policy);
4152         if (minimal_transition_context) {
4153                 if (dc->hwss.is_pipe_topology_transition_seamless(
4154                                         dc, dc->current_state, minimal_transition_context) &&
4155                         dc->hwss.is_pipe_topology_transition_seamless(
4156                                         dc, minimal_transition_context, context)) {
4157                         DC_LOG_DC("%s base = new state\n", __func__);
4158
4159                         success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
4160                 }
4161                 release_minimal_transition_state(dc, minimal_transition_context, &policy);
4162         }
4163
4164         if (!success) {
4165                 /* commit based on current context */
4166                 restore_planes_and_stream_state(&dc->current_state->scratch, stream);
4167                 minimal_transition_context = create_minimal_transition_state(dc,
4168                                 dc->current_state, &policy);
4169                 if (minimal_transition_context) {
4170                         if (dc->hwss.is_pipe_topology_transition_seamless(
4171                                         dc, dc->current_state, minimal_transition_context) &&
4172                                 dc->hwss.is_pipe_topology_transition_seamless(
4173                                                 dc, minimal_transition_context, context)) {
4174                                 DC_LOG_DC("%s base = current state\n", __func__);
4175                                 success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
4176                         }
4177                         release_minimal_transition_state(dc, minimal_transition_context, &policy);
4178                 }
4179                 restore_planes_and_stream_state(&context->scratch, stream);
4180         }
4181
4182         ASSERT(success);
4183         return success;
4184 }
4185
4186 /**
4187  * commit_minimal_transition_state_legacy - Create a transition pipe split state
4188  *
4189  * @dc: Used to get the current state status
4190  * @transition_base_context: New transition state
4191  *
4192  * In some specific configurations, such as pipe split on multi-display with
4193  * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe
4194  * programming when moving to new planes. To mitigate those types of problems,
4195  * this function adds a transition state that minimizes pipe usage before
4196  * programming the new configuration. When adding a new plane, the current
4197  * state requires the least pipes, so it is applied without splitting. When
4198  * removing a plane, the new state requires the least pipes, so it is applied
4199  * without splitting.
4200  *
4201  * Return:
4202  * Return false if something is wrong in the transition state.
4203  */
4204 static bool commit_minimal_transition_state_legacy(struct dc *dc,
4205                 struct dc_state *transition_base_context)
4206 {
4207         struct dc_state *transition_context;
4208         struct pipe_split_policy_backup policy;
4209         enum dc_status ret = DC_ERROR_UNEXPECTED;
4210         unsigned int i, j;
4211         unsigned int pipe_in_use = 0;
4212         bool subvp_in_use = false;
4213         bool odm_in_use = false;
4214
4215         /* check current pipes in use*/
4216         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4217                 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4218
4219                 if (pipe->plane_state)
4220                         pipe_in_use++;
4221         }
4222
4223         /* If SubVP is enabled and we are adding or removing planes from any main subvp
4224          * pipe, we must use the minimal transition.
4225          */
4226         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4227                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4228
4229                 if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
4230                         subvp_in_use = true;
4231                         break;
4232                 }
4233         }
4234
4235         /* If ODM is enabled and we are adding or removing planes from any ODM
4236          * pipe, we must use the minimal transition.
4237          */
4238         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4239                 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4240
4241                 if (resource_is_pipe_type(pipe, OTG_MASTER)) {
4242                         odm_in_use = resource_get_odm_slice_count(pipe) > 1;
4243                         break;
4244                 }
4245         }
4246
4247         /* When the OS add a new surface if we have been used all of pipes with odm combine
4248          * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
4249          * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
4250          * call it again. Otherwise return true to skip.
4251          *
4252          * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
4253          * enter/exit MPO when DCN still have enough resources.
4254          */
4255         if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use)
4256                 return true;
4257
4258         DC_LOG_DC("%s base = %s state, reason = %s\n", __func__,
4259                         dc->current_state == transition_base_context ? "current" : "new",
4260                         subvp_in_use ? "Subvp In Use" :
4261                         odm_in_use ? "ODM in Use" :
4262                         dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
4263                         "Unknown");
4264
4265         transition_context = create_minimal_transition_state(dc,
4266                         transition_base_context, &policy);
4267         if (transition_context) {
4268                 ret = dc_commit_state_no_check(dc, transition_context);
4269                 release_minimal_transition_state(dc, transition_context, &policy);
4270         }
4271
4272         if (ret != DC_OK) {
4273                 /* this should never happen */
4274                 BREAK_TO_DEBUGGER();
4275                 return false;
4276         }
4277
4278         /* force full surface update */
4279         for (i = 0; i < dc->current_state->stream_count; i++) {
4280                 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
4281                         dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
4282                 }
4283         }
4284
4285         return true;
4286 }
4287
4288 /**
4289  * update_seamless_boot_flags() - Helper function for updating seamless boot flags
4290  *
4291  * @dc: Current DC state
4292  * @context: New DC state to be programmed
4293  * @surface_count: Number of surfaces that have an updated
4294  * @stream: Corresponding stream to be updated in the current flip
4295  *
4296  * Updating seamless boot flags do not need to be part of the commit sequence. This
4297  * helper function will update the seamless boot flags on each flip (if required)
4298  * outside of the HW commit sequence (fast or slow).
4299  *
4300  * Return: void
4301  */
4302 static void update_seamless_boot_flags(struct dc *dc,
4303                 struct dc_state *context,
4304                 int surface_count,
4305                 struct dc_stream_state *stream)
4306 {
4307         if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
4308                 /* Optimize seamless boot flag keeps clocks and watermarks high until
4309                  * first flip. After first flip, optimization is required to lower
4310                  * bandwidth. Important to note that it is expected UEFI will
4311                  * only light up a single display on POST, therefore we only expect
4312                  * one stream with seamless boot flag set.
4313                  */
4314                 if (stream->apply_seamless_boot_optimization) {
4315                         stream->apply_seamless_boot_optimization = false;
4316
4317                         if (get_seamless_boot_stream_count(context) == 0)
4318                                 dc->optimized_required = true;
4319                 }
4320         }
4321 }
4322
4323 static void populate_fast_updates(struct dc_fast_update *fast_update,
4324                 struct dc_surface_update *srf_updates,
4325                 int surface_count,
4326                 struct dc_stream_update *stream_update)
4327 {
4328         int i = 0;
4329
4330         if (stream_update) {
4331                 fast_update[0].out_transfer_func = stream_update->out_transfer_func;
4332                 fast_update[0].output_csc_transform = stream_update->output_csc_transform;
4333         }
4334
4335         for (i = 0; i < surface_count; i++) {
4336                 fast_update[i].flip_addr = srf_updates[i].flip_addr;
4337                 fast_update[i].gamma = srf_updates[i].gamma;
4338                 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
4339                 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
4340                 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
4341         }
4342 }
4343
4344 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
4345 {
4346         int i;
4347
4348         if (fast_update[0].out_transfer_func ||
4349                 fast_update[0].output_csc_transform)
4350                 return true;
4351
4352         for (i = 0; i < surface_count; i++) {
4353                 if (fast_update[i].flip_addr ||
4354                                 fast_update[i].gamma ||
4355                                 fast_update[i].gamut_remap_matrix ||
4356                                 fast_update[i].input_csc_color_matrix ||
4357                                 fast_update[i].coeff_reduction_factor)
4358                         return true;
4359         }
4360
4361         return false;
4362 }
4363
4364 static bool full_update_required(struct dc *dc,
4365                 struct dc_surface_update *srf_updates,
4366                 int surface_count,
4367                 struct dc_stream_update *stream_update,
4368                 struct dc_stream_state *stream)
4369 {
4370
4371         int i;
4372         struct dc_stream_status *stream_status;
4373         const struct dc_state *context = dc->current_state;
4374
4375         for (i = 0; i < surface_count; i++) {
4376                 if (srf_updates &&
4377                                 (srf_updates[i].plane_info ||
4378                                 srf_updates[i].scaling_info ||
4379                                 (srf_updates[i].hdr_mult.value &&
4380                                 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
4381                                 srf_updates[i].in_transfer_func ||
4382                                 srf_updates[i].func_shaper ||
4383                                 srf_updates[i].lut3d_func ||
4384                                 srf_updates[i].surface->force_full_update ||
4385                                 (srf_updates[i].flip_addr &&
4386                                 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
4387                                 !is_surface_in_context(context, srf_updates[i].surface)))
4388                         return true;
4389         }
4390
4391         if (stream_update &&
4392                         (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
4393                         (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
4394                         stream_update->integer_scaling_update) ||
4395                         stream_update->hdr_static_metadata ||
4396                         stream_update->abm_level ||
4397                         stream_update->periodic_interrupt ||
4398                         stream_update->vrr_infopacket ||
4399                         stream_update->vsc_infopacket ||
4400                         stream_update->vsp_infopacket ||
4401                         stream_update->hfvsif_infopacket ||
4402                         stream_update->vtem_infopacket ||
4403                         stream_update->adaptive_sync_infopacket ||
4404                         stream_update->dpms_off ||
4405                         stream_update->allow_freesync ||
4406                         stream_update->vrr_active_variable ||
4407                         stream_update->vrr_active_fixed ||
4408                         stream_update->gamut_remap ||
4409                         stream_update->output_color_space ||
4410                         stream_update->dither_option ||
4411                         stream_update->wb_update ||
4412                         stream_update->dsc_config ||
4413                         stream_update->mst_bw_update ||
4414                         stream_update->func_shaper ||
4415                         stream_update->lut3d_func ||
4416                         stream_update->pending_test_pattern ||
4417                         stream_update->crtc_timing_adjust))
4418                 return true;
4419
4420         if (stream) {
4421                 stream_status = dc_stream_get_status(stream);
4422                 if (stream_status == NULL || stream_status->plane_count != surface_count)
4423                         return true;
4424         }
4425         if (dc->idle_optimizations_allowed)
4426                 return true;
4427
4428         return false;
4429 }
4430
4431 static bool fast_update_only(struct dc *dc,
4432                 struct dc_fast_update *fast_update,
4433                 struct dc_surface_update *srf_updates,
4434                 int surface_count,
4435                 struct dc_stream_update *stream_update,
4436                 struct dc_stream_state *stream)
4437 {
4438         return fast_updates_exist(fast_update, surface_count)
4439                         && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
4440 }
4441
4442 bool dc_update_planes_and_stream(struct dc *dc,
4443                 struct dc_surface_update *srf_updates, int surface_count,
4444                 struct dc_stream_state *stream,
4445                 struct dc_stream_update *stream_update)
4446 {
4447         struct dc_state *context;
4448         enum surface_update_type update_type;
4449         int i;
4450         struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4451
4452         /* In cases where MPO and split or ODM are used transitions can
4453          * cause underflow. Apply stream configuration with minimal pipe
4454          * split first to avoid unsupported transitions for active pipes.
4455          */
4456         bool force_minimal_pipe_splitting = 0;
4457         bool is_plane_addition = 0;
4458         bool is_fast_update_only;
4459
4460         dc_exit_ips_for_hw_access(dc);
4461
4462         populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4463         is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
4464                         surface_count, stream_update, stream);
4465         force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
4466                         dc,
4467                         stream,
4468                         srf_updates,
4469                         surface_count,
4470                         &is_plane_addition);
4471
4472         /* on plane addition, minimal state is the current one */
4473         if (force_minimal_pipe_splitting && is_plane_addition &&
4474                 !commit_minimal_transition_state_legacy(dc, dc->current_state))
4475                                 return false;
4476
4477         if (!update_planes_and_stream_state(
4478                         dc,
4479                         srf_updates,
4480                         surface_count,
4481                         stream,
4482                         stream_update,
4483                         &update_type,
4484                         &context))
4485                 return false;
4486
4487         /* on plane removal, minimal state is the new one */
4488         if (force_minimal_pipe_splitting && !is_plane_addition) {
4489                 if (!commit_minimal_transition_state_legacy(dc, context)) {
4490                         dc_state_release(context);
4491                         return false;
4492                 }
4493                 update_type = UPDATE_TYPE_FULL;
4494         }
4495
4496         if (dc->hwss.is_pipe_topology_transition_seamless &&
4497                         !dc->hwss.is_pipe_topology_transition_seamless(
4498                                         dc, dc->current_state, context)) {
4499                 commit_minimal_transition_state(dc,
4500                                 context, stream);
4501         }
4502         update_seamless_boot_flags(dc, context, surface_count, stream);
4503         if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
4504                 commit_planes_for_stream_fast(dc,
4505                                 srf_updates,
4506                                 surface_count,
4507                                 stream,
4508                                 stream_update,
4509                                 update_type,
4510                                 context);
4511         } else {
4512                 if (!stream_update &&
4513                                 dc->hwss.is_pipe_topology_transition_seamless &&
4514                                 !dc->hwss.is_pipe_topology_transition_seamless(
4515                                                 dc, dc->current_state, context)) {
4516                         DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
4517                         BREAK_TO_DEBUGGER();
4518                 }
4519                 commit_planes_for_stream(
4520                                 dc,
4521                                 srf_updates,
4522                                 surface_count,
4523                                 stream,
4524                                 stream_update,
4525                                 update_type,
4526                                 context);
4527         }
4528
4529         if (dc->current_state != context) {
4530
4531                 /* Since memory free requires elevated IRQL, an interrupt
4532                  * request is generated by mem free. If this happens
4533                  * between freeing and reassigning the context, our vsync
4534                  * interrupt will call into dc and cause a memory
4535                  * corruption BSOD. Hence, we first reassign the context,
4536                  * then free the old context.
4537                  */
4538
4539                 struct dc_state *old = dc->current_state;
4540
4541                 dc->current_state = context;
4542                 dc_state_release(old);
4543
4544                 // clear any forced full updates
4545                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4546                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4547
4548                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4549                                 pipe_ctx->plane_state->force_full_update = false;
4550                 }
4551         }
4552         return true;
4553 }
4554
4555 void dc_commit_updates_for_stream(struct dc *dc,
4556                 struct dc_surface_update *srf_updates,
4557                 int surface_count,
4558                 struct dc_stream_state *stream,
4559                 struct dc_stream_update *stream_update,
4560                 struct dc_state *state)
4561 {
4562         const struct dc_stream_status *stream_status;
4563         enum surface_update_type update_type;
4564         struct dc_state *context;
4565         struct dc_context *dc_ctx = dc->ctx;
4566         int i, j;
4567         struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4568
4569         dc_exit_ips_for_hw_access(dc);
4570
4571         populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4572         stream_status = dc_stream_get_status(stream);
4573         context = dc->current_state;
4574
4575         update_type = dc_check_update_surfaces_for_stream(
4576                                 dc, srf_updates, surface_count, stream_update, stream_status);
4577
4578         /* TODO: Since change commit sequence can have a huge impact,
4579          * we decided to only enable it for DCN3x. However, as soon as
4580          * we get more confident about this change we'll need to enable
4581          * the new sequence for all ASICs.
4582          */
4583         if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
4584                 /*
4585                  * Previous frame finished and HW is ready for optimization.
4586                  */
4587                 if (update_type == UPDATE_TYPE_FAST)
4588                         dc_post_update_surfaces_to_stream(dc);
4589
4590                 dc_update_planes_and_stream(dc, srf_updates,
4591                                             surface_count, stream,
4592                                             stream_update);
4593                 return;
4594         }
4595
4596         if (update_type >= update_surface_trace_level)
4597                 update_surface_trace(dc, srf_updates, surface_count);
4598
4599
4600         if (update_type >= UPDATE_TYPE_FULL) {
4601
4602                 /* initialize scratch memory for building context */
4603                 context = dc_state_create_copy(state);
4604                 if (context == NULL) {
4605                         DC_ERROR("Failed to allocate new validate context!\n");
4606                         return;
4607                 }
4608
4609                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4610                         struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
4611                         struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4612
4613                         if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
4614                                 new_pipe->plane_state->force_full_update = true;
4615                 }
4616         } else if (update_type == UPDATE_TYPE_FAST) {
4617                 /*
4618                  * Previous frame finished and HW is ready for optimization.
4619                  */
4620                 dc_post_update_surfaces_to_stream(dc);
4621         }
4622
4623
4624         for (i = 0; i < surface_count; i++) {
4625                 struct dc_plane_state *surface = srf_updates[i].surface;
4626
4627                 copy_surface_update_to_plane(surface, &srf_updates[i]);
4628
4629                 if (update_type >= UPDATE_TYPE_MED) {
4630                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
4631                                 struct pipe_ctx *pipe_ctx =
4632                                         &context->res_ctx.pipe_ctx[j];
4633
4634                                 if (pipe_ctx->plane_state != surface)
4635                                         continue;
4636
4637                                 resource_build_scaling_params(pipe_ctx);
4638                         }
4639                 }
4640         }
4641
4642         copy_stream_update_to_stream(dc, context, stream, stream_update);
4643
4644         if (update_type >= UPDATE_TYPE_FULL) {
4645                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
4646                         DC_ERROR("Mode validation failed for stream update!\n");
4647                         dc_state_release(context);
4648                         return;
4649                 }
4650         }
4651
4652         TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
4653
4654         update_seamless_boot_flags(dc, context, surface_count, stream);
4655         if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
4656                         !dc->debug.enable_legacy_fast_update) {
4657                 commit_planes_for_stream_fast(dc,
4658                                 srf_updates,
4659                                 surface_count,
4660                                 stream,
4661                                 stream_update,
4662                                 update_type,
4663                                 context);
4664         } else {
4665                 commit_planes_for_stream(
4666                                 dc,
4667                                 srf_updates,
4668                                 surface_count,
4669                                 stream,
4670                                 stream_update,
4671                                 update_type,
4672                                 context);
4673         }
4674         /*update current_State*/
4675         if (dc->current_state != context) {
4676
4677                 struct dc_state *old = dc->current_state;
4678
4679                 dc->current_state = context;
4680                 dc_state_release(old);
4681
4682                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4683                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4684
4685                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4686                                 pipe_ctx->plane_state->force_full_update = false;
4687                 }
4688         }
4689
4690         /* Legacy optimization path for DCE. */
4691         if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
4692                 dc_post_update_surfaces_to_stream(dc);
4693                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
4694         }
4695
4696         return;
4697
4698 }
4699
4700 uint8_t dc_get_current_stream_count(struct dc *dc)
4701 {
4702         return dc->current_state->stream_count;
4703 }
4704
4705 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
4706 {
4707         if (i < dc->current_state->stream_count)
4708                 return dc->current_state->streams[i];
4709         return NULL;
4710 }
4711
4712 enum dc_irq_source dc_interrupt_to_irq_source(
4713                 struct dc *dc,
4714                 uint32_t src_id,
4715                 uint32_t ext_id)
4716 {
4717         return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
4718 }
4719
4720 /*
4721  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
4722  */
4723 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
4724 {
4725
4726         if (dc == NULL)
4727                 return false;
4728
4729         return dal_irq_service_set(dc->res_pool->irqs, src, enable);
4730 }
4731
4732 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
4733 {
4734         dal_irq_service_ack(dc->res_pool->irqs, src);
4735 }
4736
4737 void dc_power_down_on_boot(struct dc *dc)
4738 {
4739         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
4740                         dc->hwss.power_down_on_boot)
4741                 dc->hwss.power_down_on_boot(dc);
4742 }
4743
4744 void dc_set_power_state(
4745         struct dc *dc,
4746         enum dc_acpi_cm_power_state power_state)
4747 {
4748         if (!dc->current_state)
4749                 return;
4750
4751         switch (power_state) {
4752         case DC_ACPI_CM_POWER_STATE_D0:
4753                 dc_state_construct(dc, dc->current_state);
4754
4755                 dc_exit_ips_for_hw_access(dc);
4756
4757                 dc_z10_restore(dc);
4758
4759                 dc->hwss.init_hw(dc);
4760
4761                 if (dc->hwss.init_sys_ctx != NULL &&
4762                         dc->vm_pa_config.valid) {
4763                         dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
4764                 }
4765
4766                 break;
4767         default:
4768                 ASSERT(dc->current_state->stream_count == 0);
4769
4770                 dc_state_destruct(dc->current_state);
4771
4772                 break;
4773         }
4774 }
4775
4776 void dc_resume(struct dc *dc)
4777 {
4778         uint32_t i;
4779
4780         for (i = 0; i < dc->link_count; i++)
4781                 dc->link_srv->resume(dc->links[i]);
4782 }
4783
4784 bool dc_is_dmcu_initialized(struct dc *dc)
4785 {
4786         struct dmcu *dmcu = dc->res_pool->dmcu;
4787
4788         if (dmcu)
4789                 return dmcu->funcs->is_dmcu_initialized(dmcu);
4790         return false;
4791 }
4792
4793 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
4794 {
4795         info->displayClock                              = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
4796         info->engineClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
4797         info->memoryClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
4798         info->maxSupportedDppClock              = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
4799         info->dppClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
4800         info->socClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
4801         info->dcfClockDeepSleep                 = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
4802         info->fClock                                    = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
4803         info->phyClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
4804 }
4805 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
4806 {
4807         if (dc->hwss.set_clock)
4808                 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
4809         return DC_ERROR_UNEXPECTED;
4810 }
4811 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
4812 {
4813         if (dc->hwss.get_clock)
4814                 dc->hwss.get_clock(dc, clock_type, clock_cfg);
4815 }
4816
4817 /* enable/disable eDP PSR without specify stream for eDP */
4818 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
4819 {
4820         int i;
4821         bool allow_active;
4822
4823         for (i = 0; i < dc->current_state->stream_count ; i++) {
4824                 struct dc_link *link;
4825                 struct dc_stream_state *stream = dc->current_state->streams[i];
4826
4827                 link = stream->link;
4828                 if (!link)
4829                         continue;
4830
4831                 if (link->psr_settings.psr_feature_enabled) {
4832                         if (enable && !link->psr_settings.psr_allow_active) {
4833                                 allow_active = true;
4834                                 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
4835                                         return false;
4836                         } else if (!enable && link->psr_settings.psr_allow_active) {
4837                                 allow_active = false;
4838                                 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
4839                                         return false;
4840                         }
4841                 }
4842         }
4843
4844         return true;
4845 }
4846
4847 /* enable/disable eDP Replay without specify stream for eDP */
4848 bool dc_set_replay_allow_active(struct dc *dc, bool active)
4849 {
4850         int i;
4851         bool allow_active;
4852
4853         for (i = 0; i < dc->current_state->stream_count; i++) {
4854                 struct dc_link *link;
4855                 struct dc_stream_state *stream = dc->current_state->streams[i];
4856
4857                 link = stream->link;
4858                 if (!link)
4859                         continue;
4860
4861                 if (link->replay_settings.replay_feature_enabled) {
4862                         if (active && !link->replay_settings.replay_allow_active) {
4863                                 allow_active = true;
4864                                 if (!dc_link_set_replay_allow_active(link, &allow_active,
4865                                         false, false, NULL))
4866                                         return false;
4867                         } else if (!active && link->replay_settings.replay_allow_active) {
4868                                 allow_active = false;
4869                                 if (!dc_link_set_replay_allow_active(link, &allow_active,
4870                                         true, false, NULL))
4871                                         return false;
4872                         }
4873                 }
4874         }
4875
4876         return true;
4877 }
4878
4879 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
4880 {
4881         if (dc->debug.disable_idle_power_optimizations)
4882                 return;
4883
4884         if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
4885                 return;
4886
4887         if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
4888                 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
4889                         return;
4890
4891         if (allow == dc->idle_optimizations_allowed)
4892                 return;
4893
4894         if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
4895                 dc->idle_optimizations_allowed = allow;
4896 }
4897
4898 void dc_exit_ips_for_hw_access(struct dc *dc)
4899 {
4900         if (dc->caps.ips_support)
4901                 dc_allow_idle_optimizations(dc, false);
4902 }
4903
4904 bool dc_dmub_is_ips_idle_state(struct dc *dc)
4905 {
4906         if (dc->debug.disable_idle_power_optimizations)
4907                 return false;
4908
4909         if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
4910                 return false;
4911
4912         if (!dc->ctx->dmub_srv)
4913                 return false;
4914
4915         return dc->ctx->dmub_srv->idle_allowed;
4916 }
4917
4918 /* set min and max memory clock to lowest and highest DPM level, respectively */
4919 void dc_unlock_memory_clock_frequency(struct dc *dc)
4920 {
4921         if (dc->clk_mgr->funcs->set_hard_min_memclk)
4922                 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
4923
4924         if (dc->clk_mgr->funcs->set_hard_max_memclk)
4925                 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4926 }
4927
4928 /* set min memory clock to the min required for current mode, max to maxDPM */
4929 void dc_lock_memory_clock_frequency(struct dc *dc)
4930 {
4931         if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
4932                 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
4933
4934         if (dc->clk_mgr->funcs->set_hard_min_memclk)
4935                 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
4936
4937         if (dc->clk_mgr->funcs->set_hard_max_memclk)
4938                 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4939 }
4940
4941 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
4942 {
4943         struct dc_state *context = dc->current_state;
4944         struct hubp *hubp;
4945         struct pipe_ctx *pipe;
4946         int i;
4947
4948         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4949                 pipe = &context->res_ctx.pipe_ctx[i];
4950
4951                 if (pipe->stream != NULL) {
4952                         dc->hwss.disable_pixel_data(dc, pipe, true);
4953
4954                         // wait for double buffer
4955                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4956                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
4957                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4958
4959                         hubp = pipe->plane_res.hubp;
4960                         hubp->funcs->set_blank_regs(hubp, true);
4961                 }
4962         }
4963
4964         dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
4965         dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
4966
4967         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4968                 pipe = &context->res_ctx.pipe_ctx[i];
4969
4970                 if (pipe->stream != NULL) {
4971                         dc->hwss.disable_pixel_data(dc, pipe, false);
4972
4973                         hubp = pipe->plane_res.hubp;
4974                         hubp->funcs->set_blank_regs(hubp, false);
4975                 }
4976         }
4977 }
4978
4979
4980 /**
4981  * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
4982  * @dc: pointer to dc of the dm calling this
4983  * @enable: True = transition to DC mode, false = transition back to AC mode
4984  *
4985  * Some SoCs define additional clock limits when in DC mode, DM should
4986  * invoke this function when the platform undergoes a power source transition
4987  * so DC can apply/unapply the limit. This interface may be disruptive to
4988  * the onscreen content.
4989  *
4990  * Context: Triggered by OS through DM interface, or manually by escape calls.
4991  * Need to hold a dclock when doing so.
4992  *
4993  * Return: none (void function)
4994  *
4995  */
4996 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
4997 {
4998         unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
4999         bool p_state_change_support;
5000
5001         if (!dc->config.dc_mode_clk_limit_support)
5002                 return;
5003
5004         softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
5005         for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
5006                 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
5007                         maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
5008         }
5009         funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
5010         p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
5011
5012         if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
5013                 if (p_state_change_support) {
5014                         if (funcMin <= softMax)
5015                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
5016                         // else: No-Op
5017                 } else {
5018                         if (funcMin <= softMax)
5019                                 blank_and_force_memclk(dc, true, softMax);
5020                         // else: No-Op
5021                 }
5022         } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
5023                 if (p_state_change_support) {
5024                         if (funcMin <= softMax)
5025                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
5026                         // else: No-Op
5027                 } else {
5028                         if (funcMin <= softMax)
5029                                 blank_and_force_memclk(dc, true, maxDPM);
5030                         // else: No-Op
5031                 }
5032         }
5033         dc->clk_mgr->dc_mode_softmax_enabled = enable;
5034 }
5035 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
5036                 struct dc_cursor_attributes *cursor_attr)
5037 {
5038         if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
5039                 return true;
5040         return false;
5041 }
5042
5043 /* cleanup on driver unload */
5044 void dc_hardware_release(struct dc *dc)
5045 {
5046         dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
5047
5048         if (dc->hwss.hardware_release)
5049                 dc->hwss.hardware_release(dc);
5050 }
5051
5052 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
5053 {
5054         if (dc->current_state)
5055                 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
5056 }
5057
5058 /**
5059  * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
5060  *
5061  * @dc: [in] dc structure
5062  *
5063  * Checks whether DMUB FW supports outbox notifications, if supported DM
5064  * should register outbox interrupt prior to actually enabling interrupts
5065  * via dc_enable_dmub_outbox
5066  *
5067  * Return:
5068  * True if DMUB FW supports outbox notifications, False otherwise
5069  */
5070 bool dc_is_dmub_outbox_supported(struct dc *dc)
5071 {
5072         switch (dc->ctx->asic_id.chip_family) {
5073
5074         case FAMILY_YELLOW_CARP:
5075                 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
5076                 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
5077                     !dc->debug.dpia_debug.bits.disable_dpia)
5078                         return true;
5079         break;
5080
5081         case AMDGPU_FAMILY_GC_11_0_1:
5082         case AMDGPU_FAMILY_GC_11_5_0:
5083                 if (!dc->debug.dpia_debug.bits.disable_dpia)
5084                         return true;
5085         break;
5086
5087         default:
5088                 break;
5089         }
5090
5091         /* dmub aux needs dmub notifications to be enabled */
5092         return dc->debug.enable_dmub_aux_for_legacy_ddc;
5093
5094 }
5095
5096 /**
5097  * dc_enable_dmub_notifications - Check if dmub fw supports outbox
5098  *
5099  * @dc: [in] dc structure
5100  *
5101  * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
5102  * notifications. All DMs shall switch to dc_is_dmub_outbox_supported.  This
5103  * API shall be removed after switching.
5104  *
5105  * Return:
5106  * True if DMUB FW supports outbox notifications, False otherwise
5107  */
5108 bool dc_enable_dmub_notifications(struct dc *dc)
5109 {
5110         return dc_is_dmub_outbox_supported(dc);
5111 }
5112
5113 /**
5114  * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
5115  *
5116  * @dc: [in] dc structure
5117  *
5118  * Enables DMUB unsolicited notifications to x86 via outbox.
5119  */
5120 void dc_enable_dmub_outbox(struct dc *dc)
5121 {
5122         struct dc_context *dc_ctx = dc->ctx;
5123
5124         dmub_enable_outbox_notification(dc_ctx->dmub_srv);
5125         DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
5126 }
5127
5128 /**
5129  * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
5130  *                                      Sets port index appropriately for legacy DDC
5131  * @dc: dc structure
5132  * @link_index: link index
5133  * @payload: aux payload
5134  *
5135  * Returns: True if successful, False if failure
5136  */
5137 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
5138                                 uint32_t link_index,
5139                                 struct aux_payload *payload)
5140 {
5141         uint8_t action;
5142         union dmub_rb_cmd cmd = {0};
5143
5144         ASSERT(payload->length <= 16);
5145
5146         cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
5147         cmd.dp_aux_access.header.payload_bytes = 0;
5148         /* For dpia, ddc_pin is set to NULL */
5149         if (!dc->links[link_index]->ddc->ddc_pin)
5150                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
5151         else
5152                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
5153
5154         cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
5155         cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
5156         cmd.dp_aux_access.aux_control.timeout = 0;
5157         cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
5158         cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
5159         cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
5160
5161         /* set aux action */
5162         if (payload->i2c_over_aux) {
5163                 if (payload->write) {
5164                         if (payload->mot)
5165                                 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
5166                         else
5167                                 action = DP_AUX_REQ_ACTION_I2C_WRITE;
5168                 } else {
5169                         if (payload->mot)
5170                                 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
5171                         else
5172                                 action = DP_AUX_REQ_ACTION_I2C_READ;
5173                         }
5174         } else {
5175                 if (payload->write)
5176                         action = DP_AUX_REQ_ACTION_DPCD_WRITE;
5177                 else
5178                         action = DP_AUX_REQ_ACTION_DPCD_READ;
5179         }
5180
5181         cmd.dp_aux_access.aux_control.dpaux.action = action;
5182
5183         if (payload->length && payload->write) {
5184                 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
5185                         payload->data,
5186                         payload->length
5187                         );
5188         }
5189
5190         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5191
5192         return true;
5193 }
5194
5195 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
5196                                             uint8_t dpia_port_index)
5197 {
5198         uint8_t index, link_index = 0xFF;
5199
5200         for (index = 0; index < dc->link_count; index++) {
5201                 /* ddc_hw_inst has dpia port index for dpia links
5202                  * and ddc instance for legacy links
5203                  */
5204                 if (!dc->links[index]->ddc->ddc_pin) {
5205                         if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
5206                                 link_index = index;
5207                                 break;
5208                         }
5209                 }
5210         }
5211         ASSERT(link_index != 0xFF);
5212         return link_index;
5213 }
5214
5215 /**
5216  * dc_process_dmub_set_config_async - Submits set_config command
5217  *
5218  * @dc: [in] dc structure
5219  * @link_index: [in] link_index: link index
5220  * @payload: [in] aux payload
5221  * @notify: [out] set_config immediate reply
5222  *
5223  * Submits set_config command to dmub via inbox message.
5224  *
5225  * Return:
5226  * True if successful, False if failure
5227  */
5228 bool dc_process_dmub_set_config_async(struct dc *dc,
5229                                 uint32_t link_index,
5230                                 struct set_config_cmd_payload *payload,
5231                                 struct dmub_notification *notify)
5232 {
5233         union dmub_rb_cmd cmd = {0};
5234         bool is_cmd_complete = true;
5235
5236         /* prepare SET_CONFIG command */
5237         cmd.set_config_access.header.type = DMUB_CMD__DPIA;
5238         cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
5239
5240         cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
5241         cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
5242         cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
5243
5244         if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
5245                 /* command is not processed by dmub */
5246                 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
5247                 return is_cmd_complete;
5248         }
5249
5250         /* command processed by dmub, if ret_status is 1, it is completed instantly */
5251         if (cmd.set_config_access.header.ret_status == 1)
5252                 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
5253         else
5254                 /* cmd pending, will receive notification via outbox */
5255                 is_cmd_complete = false;
5256
5257         return is_cmd_complete;
5258 }
5259
5260 /**
5261  * dc_process_dmub_set_mst_slots - Submits MST solt allocation
5262  *
5263  * @dc: [in] dc structure
5264  * @link_index: [in] link index
5265  * @mst_alloc_slots: [in] mst slots to be allotted
5266  * @mst_slots_in_use: [out] mst slots in use returned in failure case
5267  *
5268  * Submits mst slot allocation command to dmub via inbox message
5269  *
5270  * Return:
5271  * DC_OK if successful, DC_ERROR if failure
5272  */
5273 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
5274                                 uint32_t link_index,
5275                                 uint8_t mst_alloc_slots,
5276                                 uint8_t *mst_slots_in_use)
5277 {
5278         union dmub_rb_cmd cmd = {0};
5279
5280         /* prepare MST_ALLOC_SLOTS command */
5281         cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
5282         cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
5283
5284         cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
5285         cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
5286
5287         if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
5288                 /* command is not processed by dmub */
5289                 return DC_ERROR_UNEXPECTED;
5290
5291         /* command processed by dmub, if ret_status is 1 */
5292         if (cmd.set_config_access.header.ret_status != 1)
5293                 /* command processing error */
5294                 return DC_ERROR_UNEXPECTED;
5295
5296         /* command processed and we have a status of 2, mst not enabled in dpia */
5297         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
5298                 return DC_FAIL_UNSUPPORTED_1;
5299
5300         /* previously configured mst alloc and used slots did not match */
5301         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
5302                 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
5303                 return DC_NOT_SUPPORTED;
5304         }
5305
5306         return DC_OK;
5307 }
5308
5309 /**
5310  * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
5311  *
5312  * @dc: [in] dc structure
5313  * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
5314  *
5315  * Submits dpia hpd int enable command to dmub via inbox message
5316  */
5317 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
5318                                 uint32_t hpd_int_enable)
5319 {
5320         union dmub_rb_cmd cmd = {0};
5321
5322         cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
5323         cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
5324
5325         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5326
5327         DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
5328 }
5329
5330 /**
5331  * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging
5332  *
5333  * @dc: [in] dc structure
5334  *
5335  *
5336  */
5337 void dc_print_dmub_diagnostic_data(const struct dc *dc)
5338 {
5339         dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv);
5340 }
5341
5342 /**
5343  * dc_disable_accelerated_mode - disable accelerated mode
5344  * @dc: dc structure
5345  */
5346 void dc_disable_accelerated_mode(struct dc *dc)
5347 {
5348         bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
5349 }
5350
5351
5352 /**
5353  *  dc_notify_vsync_int_state - notifies vsync enable/disable state
5354  *  @dc: dc structure
5355  *  @stream: stream where vsync int state changed
5356  *  @enable: whether vsync is enabled or disabled
5357  *
5358  *  Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
5359  *  interrupts after steady state is reached.
5360  */
5361 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
5362 {
5363         int i;
5364         int edp_num;
5365         struct pipe_ctx *pipe = NULL;
5366         struct dc_link *link = stream->sink->link;
5367         struct dc_link *edp_links[MAX_NUM_EDP];
5368
5369
5370         if (link->psr_settings.psr_feature_enabled)
5371                 return;
5372
5373         if (link->replay_settings.replay_feature_enabled)
5374                 return;
5375
5376         /*find primary pipe associated with stream*/
5377         for (i = 0; i < MAX_PIPES; i++) {
5378                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5379
5380                 if (pipe->stream == stream && pipe->stream_res.tg)
5381                         break;
5382         }
5383
5384         if (i == MAX_PIPES) {
5385                 ASSERT(0);
5386                 return;
5387         }
5388
5389         dc_get_edp_links(dc, edp_links, &edp_num);
5390
5391         /* Determine panel inst */
5392         for (i = 0; i < edp_num; i++) {
5393                 if (edp_links[i] == link)
5394                         break;
5395         }
5396
5397         if (i == edp_num) {
5398                 return;
5399         }
5400
5401         if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
5402                 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
5403 }
5404
5405 /*****************************************************************************
5406  *  dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
5407  *                          ABM
5408  *  @dc: dc structure
5409  *      @stream: stream where vsync int state changed
5410  *  @pData: abm hw states
5411  *
5412  ****************************************************************************/
5413 bool dc_abm_save_restore(
5414                 struct dc *dc,
5415                 struct dc_stream_state *stream,
5416                 struct abm_save_restore *pData)
5417 {
5418         int i;
5419         int edp_num;
5420         struct pipe_ctx *pipe = NULL;
5421         struct dc_link *link = stream->sink->link;
5422         struct dc_link *edp_links[MAX_NUM_EDP];
5423
5424         if (link->replay_settings.replay_feature_enabled)
5425                 return false;
5426
5427         /*find primary pipe associated with stream*/
5428         for (i = 0; i < MAX_PIPES; i++) {
5429                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5430
5431                 if (pipe->stream == stream && pipe->stream_res.tg)
5432                         break;
5433         }
5434
5435         if (i == MAX_PIPES) {
5436                 ASSERT(0);
5437                 return false;
5438         }
5439
5440         dc_get_edp_links(dc, edp_links, &edp_num);
5441
5442         /* Determine panel inst */
5443         for (i = 0; i < edp_num; i++)
5444                 if (edp_links[i] == link)
5445                         break;
5446
5447         if (i == edp_num)
5448                 return false;
5449
5450         if (pipe->stream_res.abm &&
5451                 pipe->stream_res.abm->funcs->save_restore)
5452                 return pipe->stream_res.abm->funcs->save_restore(
5453                                 pipe->stream_res.abm,
5454                                 i,
5455                                 pData);
5456         return false;
5457 }
5458
5459 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
5460 {
5461         unsigned int i;
5462         bool subvp_sw_cursor_req = false;
5463
5464         for (i = 0; i < dc->current_state->stream_count; i++) {
5465                 if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) {
5466                         subvp_sw_cursor_req = true;
5467                         break;
5468                 }
5469         }
5470         properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
5471 }
5472
5473 /**
5474  * dc_set_edp_power() - DM controls eDP power to be ON/OFF
5475  *
5476  * Called when DM wants to power on/off eDP.
5477  *     Only work on links with flag skip_implict_edp_power_control is set.
5478  *
5479  * @dc: Current DC state
5480  * @edp_link: a link with eDP connector signal type
5481  * @powerOn: power on/off eDP
5482  *
5483  * Return: void
5484  */
5485 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
5486                                  bool powerOn)
5487 {
5488         if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
5489                 return;
5490
5491         if (edp_link->skip_implict_edp_power_control == false)
5492                 return;
5493
5494         edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
5495 }
5496
5497 /*
5498  *****************************************************************************
5499  * dc_get_power_profile_for_dc_state() - extracts power profile from dc state
5500  *
5501  * Called when DM wants to make power policy decisions based on dc_state
5502  *
5503  *****************************************************************************
5504  */
5505 struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
5506 {
5507         struct dc_power_profile profile = { 0 };
5508
5509         profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support;
5510
5511         return profile;
5512 }
5513