drm/amd/display: Rename dc_stream to dc_stream_state
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32
33 #include "resource.h"
34
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
37
38 #include "dce_calcs.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
44
45 #include "link_hwss.h"
46 #include "link_encoder.h"
47
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
51
52 /*******************************************************************************
53  * Private functions
54  ******************************************************************************/
55 static void destroy_links(struct core_dc *dc)
56 {
57         uint32_t i;
58
59         for (i = 0; i < dc->link_count; i++) {
60                 if (NULL != dc->links[i])
61                         link_destroy(&dc->links[i]);
62         }
63 }
64
65 static bool create_links(
66                 struct core_dc *dc,
67                 uint32_t num_virtual_links)
68 {
69         int i;
70         int connectors_num;
71         struct dc_bios *bios = dc->ctx->dc_bios;
72
73         dc->link_count = 0;
74
75         connectors_num = bios->funcs->get_connectors_number(bios);
76
77         if (connectors_num > ENUM_ID_COUNT) {
78                 dm_error(
79                         "DC: Number of connectors %d exceeds maximum of %d!\n",
80                         connectors_num,
81                         ENUM_ID_COUNT);
82                 return false;
83         }
84
85         if (connectors_num == 0 && num_virtual_links == 0) {
86                 dm_error("DC: Number of connectors is zero!\n");
87         }
88
89         dm_output_to_console(
90                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
91                 __func__,
92                 connectors_num,
93                 num_virtual_links);
94
95         for (i = 0; i < connectors_num; i++) {
96                 struct link_init_data link_init_params = {0};
97                 struct dc_link *link;
98
99                 link_init_params.ctx = dc->ctx;
100                 /* next BIOS object table connector */
101                 link_init_params.connector_index = i;
102                 link_init_params.link_index = dc->link_count;
103                 link_init_params.dc = dc;
104                 link = link_create(&link_init_params);
105
106                 if (link) {
107                         dc->links[dc->link_count] = link;
108                         link->dc = dc;
109                         ++dc->link_count;
110                 }
111         }
112
113         for (i = 0; i < num_virtual_links; i++) {
114                 struct dc_link *link = dm_alloc(sizeof(*link));
115                 struct encoder_init_data enc_init = {0};
116
117                 if (link == NULL) {
118                         BREAK_TO_DEBUGGER();
119                         goto failed_alloc;
120                 }
121
122                 link->ctx = dc->ctx;
123                 link->dc = dc;
124                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
125                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
126                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
127                 link->link_id.enum_id = ENUM_ID_1;
128                 link->link_enc = dm_alloc(sizeof(*link->link_enc));
129
130                 enc_init.ctx = dc->ctx;
131                 enc_init.channel = CHANNEL_ID_UNKNOWN;
132                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
133                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
134                 enc_init.connector = link->link_id;
135                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
136                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
137                 enc_init.encoder.enum_id = ENUM_ID_1;
138                 virtual_link_encoder_construct(link->link_enc, &enc_init);
139
140                 link->link_index = dc->link_count;
141                 dc->links[dc->link_count] = link;
142                 dc->link_count++;
143         }
144
145         return true;
146
147 failed_alloc:
148         return false;
149 }
150
151 static bool stream_adjust_vmin_vmax(struct dc *dc,
152                 struct dc_stream_state **streams, int num_streams,
153                 int vmin, int vmax)
154 {
155         /* TODO: Support multiple streams */
156         struct core_dc *core_dc = DC_TO_CORE(dc);
157         struct dc_stream_state *stream = streams[0];
158         int i = 0;
159         bool ret = false;
160
161         for (i = 0; i < MAX_PIPES; i++) {
162                 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
163
164                 if (pipe->stream == stream && pipe->stream_enc) {
165                         core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
166
167                         /* build and update the info frame */
168                         resource_build_info_frame(pipe);
169                         core_dc->hwss.update_info_frame(pipe);
170
171                         ret = true;
172                 }
173         }
174         return ret;
175 }
176
177 static bool stream_get_crtc_position(struct dc *dc,
178                 struct dc_stream_state **streams, int num_streams,
179                 unsigned int *v_pos, unsigned int *nom_v_pos)
180 {
181         /* TODO: Support multiple streams */
182         struct core_dc *core_dc = DC_TO_CORE(dc);
183         struct dc_stream_state *stream = streams[0];
184         int i = 0;
185         bool ret = false;
186         struct crtc_position position;
187
188         for (i = 0; i < MAX_PIPES; i++) {
189                 struct pipe_ctx *pipe =
190                                 &core_dc->current_context->res_ctx.pipe_ctx[i];
191
192                 if (pipe->stream == stream && pipe->stream_enc) {
193                         core_dc->hwss.get_position(&pipe, 1, &position);
194
195                         *v_pos = position.vertical_count;
196                         *nom_v_pos = position.nominal_vcount;
197                         ret = true;
198                 }
199         }
200         return ret;
201 }
202
203 static bool set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
204 {
205         struct core_dc *core_dc = DC_TO_CORE(dc);
206         int i = 0;
207         bool ret = false;
208         struct pipe_ctx *pipes;
209
210         for (i = 0; i < MAX_PIPES; i++) {
211                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream == stream) {
212                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
213                         core_dc->hwss.program_gamut_remap(pipes);
214                         ret = true;
215                 }
216         }
217
218         return ret;
219 }
220
221 static bool program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
222 {
223         struct core_dc *core_dc = DC_TO_CORE(dc);
224         int i = 0;
225         bool ret = false;
226         struct pipe_ctx *pipes;
227
228         for (i = 0; i < MAX_PIPES; i++) {
229                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
230                                 == stream) {
231
232                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
233                         core_dc->hwss.program_csc_matrix(pipes,
234                         stream->output_color_space,
235                         stream->csc_color_matrix.matrix);
236                         ret = true;
237                 }
238         }
239
240         return ret;
241 }
242
243 static void set_static_screen_events(struct dc *dc,
244                 struct dc_stream_state **streams,
245                 int num_streams,
246                 const struct dc_static_screen_events *events)
247 {
248         struct core_dc *core_dc = DC_TO_CORE(dc);
249         int i = 0;
250         int j = 0;
251         struct pipe_ctx *pipes_affected[MAX_PIPES];
252         int num_pipes_affected = 0;
253
254         for (i = 0; i < num_streams; i++) {
255                 struct dc_stream_state *stream = streams[i];
256
257                 for (j = 0; j < MAX_PIPES; j++) {
258                         if (core_dc->current_context->res_ctx.pipe_ctx[j].stream
259                                         == stream) {
260                                 pipes_affected[num_pipes_affected++] =
261                                                 &core_dc->current_context->res_ctx.pipe_ctx[j];
262                         }
263                 }
264         }
265
266         core_dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
267 }
268
269 static void set_drive_settings(struct dc *dc,
270                 struct link_training_settings *lt_settings,
271                 const struct dc_link *link)
272 {
273         struct core_dc *core_dc = DC_TO_CORE(dc);
274         int i;
275
276         for (i = 0; i < core_dc->link_count; i++) {
277                 if (core_dc->links[i] == link)
278                         break;
279         }
280
281         if (i >= core_dc->link_count)
282                 ASSERT_CRITICAL(false);
283
284         dc_link_dp_set_drive_settings(core_dc->links[i], lt_settings);
285 }
286
287 static void perform_link_training(struct dc *dc,
288                 struct dc_link_settings *link_setting,
289                 bool skip_video_pattern)
290 {
291         struct core_dc *core_dc = DC_TO_CORE(dc);
292         int i;
293
294         for (i = 0; i < core_dc->link_count; i++)
295                 dc_link_dp_perform_link_training(
296                         core_dc->links[i],
297                         link_setting,
298                         skip_video_pattern);
299 }
300
301 static void set_preferred_link_settings(struct dc *dc,
302                 struct dc_link_settings *link_setting,
303                 struct dc_link *link)
304 {
305         link->preferred_link_setting = *link_setting;
306         dp_retrain_link_dp_test(link, link_setting, false);
307 }
308
309 static void enable_hpd(const struct dc_link *link)
310 {
311         dc_link_dp_enable_hpd(link);
312 }
313
314 static void disable_hpd(const struct dc_link *link)
315 {
316         dc_link_dp_disable_hpd(link);
317 }
318
319
320 static void set_test_pattern(
321                 struct dc_link *link,
322                 enum dp_test_pattern test_pattern,
323                 const struct link_training_settings *p_link_settings,
324                 const unsigned char *p_custom_pattern,
325                 unsigned int cust_pattern_size)
326 {
327         if (link != NULL)
328                 dc_link_dp_set_test_pattern(
329                         link,
330                         test_pattern,
331                         p_link_settings,
332                         p_custom_pattern,
333                         cust_pattern_size);
334 }
335
336 void set_dither_option(struct dc_stream_state *stream,
337                 enum dc_dither_option option)
338 {
339         struct bit_depth_reduction_params params;
340         struct dc_link *link = stream->status.link;
341         struct pipe_ctx *pipes = link->dc->current_context->res_ctx.pipe_ctx;
342
343         memset(&params, 0, sizeof(params));
344         if (!stream)
345                 return;
346         if (option > DITHER_OPTION_MAX)
347                 return;
348         if (option == DITHER_OPTION_DEFAULT) {
349                 switch (stream->timing.display_color_depth) {
350                 case COLOR_DEPTH_666:
351                         stream->dither_option = DITHER_OPTION_SPATIAL6;
352                         break;
353                 case COLOR_DEPTH_888:
354                         stream->dither_option = DITHER_OPTION_SPATIAL8;
355                         break;
356                 case COLOR_DEPTH_101010:
357                         stream->dither_option = DITHER_OPTION_SPATIAL10;
358                         break;
359                 default:
360                         option = DITHER_OPTION_DISABLE;
361                 }
362         } else {
363                 stream->dither_option = option;
364         }
365         resource_build_bit_depth_reduction_params(stream,
366                                 &params);
367         stream->bit_depth_params = params;
368         pipes->opp->funcs->
369                 opp_program_bit_depth_reduction(pipes->opp, &params);
370 }
371
372 static void allocate_dc_stream_funcs(struct core_dc *core_dc)
373 {
374         if (core_dc->hwss.set_drr != NULL) {
375                 core_dc->public.stream_funcs.adjust_vmin_vmax =
376                                 stream_adjust_vmin_vmax;
377         }
378
379         core_dc->public.stream_funcs.set_static_screen_events =
380                         set_static_screen_events;
381
382         core_dc->public.stream_funcs.get_crtc_position =
383                         stream_get_crtc_position;
384
385         core_dc->public.stream_funcs.set_gamut_remap =
386                         set_gamut_remap;
387
388         core_dc->public.stream_funcs.program_csc_matrix =
389                         program_csc_matrix;
390
391         core_dc->public.stream_funcs.set_dither_option =
392                         set_dither_option;
393
394         core_dc->public.link_funcs.set_drive_settings =
395                         set_drive_settings;
396
397         core_dc->public.link_funcs.perform_link_training =
398                         perform_link_training;
399
400         core_dc->public.link_funcs.set_preferred_link_settings =
401                         set_preferred_link_settings;
402
403         core_dc->public.link_funcs.enable_hpd =
404                         enable_hpd;
405
406         core_dc->public.link_funcs.disable_hpd =
407                         disable_hpd;
408
409         core_dc->public.link_funcs.set_test_pattern =
410                         set_test_pattern;
411 }
412
413 static void destruct(struct core_dc *dc)
414 {
415         dc_release_validate_context(dc->current_context);
416         dc->current_context = NULL;
417
418         destroy_links(dc);
419
420         dc_destroy_resource_pool(dc);
421
422         if (dc->ctx->gpio_service)
423                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
424
425         if (dc->ctx->i2caux)
426                 dal_i2caux_destroy(&dc->ctx->i2caux);
427
428         if (dc->ctx->created_bios)
429                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
430
431         if (dc->ctx->logger)
432                 dal_logger_destroy(&dc->ctx->logger);
433
434         dm_free(dc->ctx);
435         dc->ctx = NULL;
436 }
437
438 static bool construct(struct core_dc *dc,
439                 const struct dc_init_data *init_params)
440 {
441         struct dal_logger *logger;
442         struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
443         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
444
445         if (!dc_ctx) {
446                 dm_error("%s: failed to create ctx\n", __func__);
447                 goto ctx_fail;
448         }
449
450         dc->current_context = dm_alloc(sizeof(*dc->current_context));
451
452         if (!dc->current_context) {
453                 dm_error("%s: failed to create validate ctx\n", __func__);
454                 goto val_ctx_fail;
455         }
456
457         dc->current_context->ref_count++;
458
459         dc_ctx->cgs_device = init_params->cgs_device;
460         dc_ctx->driver_context = init_params->driver;
461         dc_ctx->dc = &dc->public;
462         dc_ctx->asic_id = init_params->asic_id;
463
464         /* Create logger */
465         logger = dal_logger_create(dc_ctx);
466
467         if (!logger) {
468                 /* can *not* call logger. call base driver 'print error' */
469                 dm_error("%s: failed to create Logger!\n", __func__);
470                 goto logger_fail;
471         }
472         dc_ctx->logger = logger;
473         dc->ctx = dc_ctx;
474         dc->ctx->dce_environment = init_params->dce_environment;
475
476         dc_version = resource_parse_asic_id(init_params->asic_id);
477         dc->ctx->dce_version = dc_version;
478 #ifdef ENABLE_FBC
479         dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
480 #endif
481         /* Resource should construct all asic specific resources.
482          * This should be the only place where we need to parse the asic id
483          */
484         if (init_params->vbios_override)
485                 dc_ctx->dc_bios = init_params->vbios_override;
486         else {
487                 /* Create BIOS parser */
488                 struct bp_init_data bp_init_data;
489
490                 bp_init_data.ctx = dc_ctx;
491                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
492
493                 dc_ctx->dc_bios = dal_bios_parser_create(
494                                 &bp_init_data, dc_version);
495
496                 if (!dc_ctx->dc_bios) {
497                         ASSERT_CRITICAL(false);
498                         goto bios_fail;
499                 }
500
501                 dc_ctx->created_bios = true;
502                 }
503
504         /* Create I2C AUX */
505         dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
506
507         if (!dc_ctx->i2caux) {
508                 ASSERT_CRITICAL(false);
509                 goto failed_to_create_i2caux;
510         }
511
512         /* Create GPIO service */
513         dc_ctx->gpio_service = dal_gpio_service_create(
514                         dc_version,
515                         dc_ctx->dce_environment,
516                         dc_ctx);
517
518         if (!dc_ctx->gpio_service) {
519                 ASSERT_CRITICAL(false);
520                 goto gpio_fail;
521         }
522
523         dc->res_pool = dc_create_resource_pool(
524                         dc,
525                         init_params->num_virtual_links,
526                         dc_version,
527                         init_params->asic_id);
528         if (!dc->res_pool)
529                 goto create_resource_fail;
530
531         if (!create_links(dc, init_params->num_virtual_links))
532                 goto create_links_fail;
533
534         allocate_dc_stream_funcs(dc);
535
536         return true;
537
538         /**** error handling here ****/
539 create_links_fail:
540 create_resource_fail:
541 gpio_fail:
542 failed_to_create_i2caux:
543 bios_fail:
544 logger_fail:
545 val_ctx_fail:
546 ctx_fail:
547         destruct(dc);
548         return false;
549 }
550
551 /*
552 void ProgramPixelDurationV(unsigned int pixelClockInKHz )
553 {
554         fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
555         unsigned int pixDurationInPico = round(pixel_duration);
556
557         DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
558
559         arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
560         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
561         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
562
563         arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
564         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
565         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
566
567         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
568         WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
569
570         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
571         WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
572 }
573 */
574
575 /*******************************************************************************
576  * Public functions
577  ******************************************************************************/
578
579 struct dc *dc_create(const struct dc_init_data *init_params)
580  {
581         struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
582         unsigned int full_pipe_count;
583
584         if (NULL == core_dc)
585                 goto alloc_fail;
586
587         if (false == construct(core_dc, init_params))
588                 goto construct_fail;
589
590         /*TODO: separate HW and SW initialization*/
591         core_dc->hwss.init_hw(core_dc);
592
593         full_pipe_count = core_dc->res_pool->pipe_count;
594         if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
595                 full_pipe_count--;
596         core_dc->public.caps.max_streams = min(
597                         full_pipe_count,
598                         core_dc->res_pool->stream_enc_count);
599
600         core_dc->public.caps.max_links = core_dc->link_count;
601         core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
602
603         core_dc->public.config = init_params->flags;
604
605         dm_logger_write(core_dc->ctx->logger, LOG_DC,
606                         "Display Core initialized\n");
607
608
609         /* TODO: missing feature to be enabled */
610         core_dc->public.debug.disable_dfs_bypass = true;
611
612         return &core_dc->public;
613
614 construct_fail:
615         dm_free(core_dc);
616
617 alloc_fail:
618         return NULL;
619 }
620
621 void dc_destroy(struct dc **dc)
622 {
623         struct core_dc *core_dc = DC_TO_CORE(*dc);
624         destruct(core_dc);
625         dm_free(core_dc);
626         *dc = NULL;
627 }
628
629 static bool is_validation_required(
630                 const struct core_dc *dc,
631                 const struct dc_validation_set set[],
632                 int set_count)
633 {
634         const struct validate_context *context = dc->current_context;
635         int i, j;
636
637         if (context->stream_count != set_count)
638                 return true;
639
640         for (i = 0; i < set_count; i++) {
641
642                 if (set[i].surface_count != context->stream_status[i].surface_count)
643                         return true;
644                 if (!dc_is_stream_unchanged(set[i].stream, context->streams[i]))
645                         return true;
646
647                 for (j = 0; j < set[i].surface_count; j++) {
648                         struct dc_plane_state temp_surf;
649                         memset(&temp_surf, 0, sizeof(temp_surf));
650
651                         temp_surf = *context->stream_status[i].surfaces[j];
652                         temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
653                         temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
654                         temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
655
656                         if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
657                                 return true;
658                 }
659         }
660
661         return false;
662 }
663
664 static bool validate_streams (
665                 const struct dc *dc,
666                 const struct dc_validation_set set[],
667                 int set_count)
668 {
669         int i;
670
671         for (i = 0; i < set_count; i++)
672                 if (!dc_validate_stream(dc, set[i].stream))
673                         return false;
674
675         return true;
676 }
677
678 static bool validate_surfaces(
679                 const struct dc *dc,
680                 const struct dc_validation_set set[],
681                 int set_count)
682 {
683         int i, j;
684
685         for (i = 0; i < set_count; i++)
686                 for (j = 0; j < set[i].surface_count; j++)
687                         if (!dc_validate_plane(dc, set[i].surfaces[j]))
688                                 return false;
689
690         return true;
691 }
692
693 struct validate_context *dc_get_validate_context(
694                 const struct dc *dc,
695                 const struct dc_validation_set set[],
696                 uint8_t set_count)
697 {
698         struct core_dc *core_dc = DC_TO_CORE(dc);
699         enum dc_status result = DC_ERROR_UNEXPECTED;
700         struct validate_context *context;
701
702
703         context = dm_alloc(sizeof(struct validate_context));
704         if (context == NULL)
705                 goto context_alloc_fail;
706
707         ++context->ref_count;
708
709         if (!is_validation_required(core_dc, set, set_count)) {
710                 dc_resource_validate_ctx_copy_construct(core_dc->current_context, context);
711                 return context;
712         }
713
714         result = core_dc->res_pool->funcs->validate_with_context(
715                         core_dc, set, set_count, context, core_dc->current_context);
716
717 context_alloc_fail:
718         if (result != DC_OK) {
719                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
720                                 "%s:resource validation failed, dc_status:%d\n",
721                                 __func__,
722                                 result);
723
724                 dc_release_validate_context(context);
725                 context = NULL;
726         }
727
728         return context;
729
730 }
731
732 bool dc_validate_resources(
733                 const struct dc *dc,
734                 const struct dc_validation_set set[],
735                 uint8_t set_count)
736 {
737         struct core_dc *core_dc = DC_TO_CORE(dc);
738         enum dc_status result = DC_ERROR_UNEXPECTED;
739         struct validate_context *context;
740
741         if (!validate_streams(dc, set, set_count))
742                 return false;
743
744         if (!validate_surfaces(dc, set, set_count))
745                 return false;
746
747         context = dm_alloc(sizeof(struct validate_context));
748         if (context == NULL)
749                 goto context_alloc_fail;
750
751         ++context->ref_count;
752
753         result = core_dc->res_pool->funcs->validate_with_context(
754                                 core_dc, set, set_count, context, NULL);
755
756 context_alloc_fail:
757         if (result != DC_OK) {
758                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
759                                 "%s:resource validation failed, dc_status:%d\n",
760                                 __func__,
761                                 result);
762         }
763
764         dc_release_validate_context(context);
765         context = NULL;
766
767         return result == DC_OK;
768 }
769
770 bool dc_validate_guaranteed(
771                 const struct dc *dc,
772                 struct dc_stream_state *stream)
773 {
774         struct core_dc *core_dc = DC_TO_CORE(dc);
775         enum dc_status result = DC_ERROR_UNEXPECTED;
776         struct validate_context *context;
777
778         if (!dc_validate_stream(dc, stream))
779                 return false;
780
781         context = dm_alloc(sizeof(struct validate_context));
782         if (context == NULL)
783                 goto context_alloc_fail;
784
785         ++context->ref_count;
786
787         result = core_dc->res_pool->funcs->validate_guaranteed(
788                                         core_dc, stream, context);
789
790         dc_release_validate_context(context);
791
792 context_alloc_fail:
793         if (result != DC_OK) {
794                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
795                         "%s:guaranteed validation failed, dc_status:%d\n",
796                         __func__,
797                         result);
798                 }
799
800         return (result == DC_OK);
801 }
802
803 static void program_timing_sync(
804                 struct core_dc *core_dc,
805                 struct validate_context *ctx)
806 {
807         int i, j;
808         int group_index = 0;
809         int pipe_count = core_dc->res_pool->pipe_count;
810         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
811
812         for (i = 0; i < pipe_count; i++) {
813                 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
814                         continue;
815
816                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
817         }
818
819         for (i = 0; i < pipe_count; i++) {
820                 int group_size = 1;
821                 struct pipe_ctx *pipe_set[MAX_PIPES];
822
823                 if (!unsynced_pipes[i])
824                         continue;
825
826                 pipe_set[0] = unsynced_pipes[i];
827                 unsynced_pipes[i] = NULL;
828
829                 /* Add tg to the set, search rest of the tg's for ones with
830                  * same timing, add all tgs with same timing to the group
831                  */
832                 for (j = i + 1; j < pipe_count; j++) {
833                         if (!unsynced_pipes[j])
834                                 continue;
835
836                         if (resource_are_streams_timing_synchronizable(
837                                         unsynced_pipes[j]->stream,
838                                         pipe_set[0]->stream)) {
839                                 pipe_set[group_size] = unsynced_pipes[j];
840                                 unsynced_pipes[j] = NULL;
841                                 group_size++;
842                         }
843                 }
844
845                 /* set first unblanked pipe as master */
846                 for (j = 0; j < group_size; j++) {
847                         struct pipe_ctx *temp;
848
849                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
850                                 if (j == 0)
851                                         break;
852
853                                 temp = pipe_set[0];
854                                 pipe_set[0] = pipe_set[j];
855                                 pipe_set[j] = temp;
856                                 break;
857                         }
858                 }
859
860                 /* remove any other unblanked pipes as they have already been synced */
861                 for (j = j + 1; j < group_size; j++) {
862                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
863                                 group_size--;
864                                 pipe_set[j] = pipe_set[group_size];
865                                 j--;
866                         }
867                 }
868
869                 if (group_size > 1) {
870                         core_dc->hwss.enable_timing_synchronization(
871                                 core_dc, group_index, group_size, pipe_set);
872                         group_index++;
873                 }
874         }
875 }
876
877 static bool context_changed(
878                 struct core_dc *dc,
879                 struct validate_context *context)
880 {
881         uint8_t i;
882
883         if (context->stream_count != dc->current_context->stream_count)
884                 return true;
885
886         for (i = 0; i < dc->current_context->stream_count; i++) {
887                 if (dc->current_context->streams[i] != context->streams[i])
888                         return true;
889         }
890
891         return false;
892 }
893
894 static bool streams_changed(
895                 struct core_dc *dc,
896                 struct dc_stream_state *streams[],
897                 uint8_t stream_count)
898 {
899         uint8_t i;
900
901         if (stream_count != dc->current_context->stream_count)
902                 return true;
903
904         for (i = 0; i < dc->current_context->stream_count; i++) {
905                 if (dc->current_context->streams[i] != streams[i])
906                         return true;
907         }
908
909         return false;
910 }
911
912 bool dc_enable_stereo(
913         struct dc *dc,
914         struct validate_context *context,
915         struct dc_stream_state *streams[],
916         uint8_t stream_count)
917 {
918         bool ret = true;
919         int i, j;
920         struct pipe_ctx *pipe;
921         struct core_dc *core_dc = DC_TO_CORE(dc);
922
923 #ifdef ENABLE_FBC
924         struct compressor *fbc_compressor = core_dc->fbc_compressor;
925 #endif
926
927         for (i = 0; i < MAX_PIPES; i++) {
928                 if (context != NULL)
929                         pipe = &context->res_ctx.pipe_ctx[i];
930                 else
931                         pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
932                 for (j = 0 ; pipe && j < stream_count; j++)  {
933                         if (streams[j] && streams[j] == pipe->stream &&
934                                 core_dc->hwss.setup_stereo)
935                                 core_dc->hwss.setup_stereo(pipe, core_dc);
936                 }
937         }
938
939 #ifdef ENABLE_FBC
940         if (fbc_compressor != NULL &&
941             fbc_compressor->funcs->is_fbc_enabled_in_hw(core_dc->fbc_compressor,
942                                                         NULL))
943                 fbc_compressor->funcs->disable_fbc(fbc_compressor);
944
945 #endif
946         return ret;
947 }
948
949
950 /*
951  * Applies given context to HW and copy it into current context.
952  * It's up to the user to release the src context afterwards.
953  */
954 static bool dc_commit_context_no_check(struct dc *dc, struct validate_context *context)
955 {
956         struct core_dc *core_dc = DC_TO_CORE(dc);
957         struct dc_bios *dcb = core_dc->ctx->dc_bios;
958         enum dc_status result = DC_ERROR_UNEXPECTED;
959         struct pipe_ctx *pipe;
960         int i, j, k, l;
961         struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
962
963         for (i = 0; i < context->stream_count; i++)
964                 dc_streams[i] =  context->streams[i];
965
966         if (!dcb->funcs->is_accelerated_mode(dcb))
967                 core_dc->hwss.enable_accelerated_mode(core_dc);
968
969         for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
970                 pipe = &context->res_ctx.pipe_ctx[i];
971                 core_dc->hwss.wait_for_mpcc_disconnect(core_dc, core_dc->res_pool, pipe);
972         }
973         result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
974
975         program_timing_sync(core_dc, context);
976
977         for (i = 0; i < context->stream_count; i++) {
978                 const struct dc_sink *sink = context->streams[i]->sink;
979
980                 for (j = 0; j < context->stream_status[i].surface_count; j++) {
981                         const struct dc_plane_state *surface =
982                                         context->stream_status[i].surfaces[j];
983
984                         core_dc->hwss.apply_ctx_for_surface(core_dc, surface, context);
985
986                         /*
987                          * enable stereo
988                          * TODO rework dc_enable_stereo call to work with validation sets?
989                          */
990                         for (k = 0; k < MAX_PIPES; k++) {
991                                 pipe = &context->res_ctx.pipe_ctx[k];
992
993                                 for (l = 0 ; pipe && l < context->stream_count; l++)  {
994                                         if (context->streams[l] &&
995                                             context->streams[l] == pipe->stream &&
996                                             core_dc->hwss.setup_stereo)
997                                                 core_dc->hwss.setup_stereo(pipe, core_dc);
998                                 }
999                         }
1000                 }
1001
1002                 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
1003                                 context->streams[i]->timing.h_addressable,
1004                                 context->streams[i]->timing.v_addressable,
1005                                 context->streams[i]->timing.h_total,
1006                                 context->streams[i]->timing.v_total,
1007                                 context->streams[i]->timing.pix_clk_khz);
1008         }
1009
1010         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1011
1012         dc_release_validate_context(core_dc->current_context);
1013
1014         core_dc->current_context = context;
1015
1016         dc_retain_validate_context(core_dc->current_context);
1017
1018         return (result == DC_OK);
1019 }
1020
1021 bool dc_commit_context(struct dc *dc, struct validate_context *context)
1022 {
1023         enum dc_status result = DC_ERROR_UNEXPECTED;
1024         struct core_dc *core_dc = DC_TO_CORE(dc);
1025         int i;
1026
1027         if (false == context_changed(core_dc, context))
1028                 return DC_OK;
1029
1030         dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
1031                                 __func__, context->stream_count);
1032
1033         for (i = 0; i < context->stream_count; i++) {
1034                 struct dc_stream_state *stream = context->streams[i];
1035
1036                 dc_stream_log(stream,
1037                                 core_dc->ctx->logger,
1038                                 LOG_DC);
1039         }
1040
1041         result = dc_commit_context_no_check(dc, context);
1042
1043         return (result == DC_OK);
1044 }
1045
1046
1047 bool dc_commit_streams(
1048         struct dc *dc,
1049         struct dc_stream_state *streams[],
1050         uint8_t stream_count)
1051 {
1052         struct core_dc *core_dc = DC_TO_CORE(dc);
1053         enum dc_status result = DC_ERROR_UNEXPECTED;
1054         struct validate_context *context;
1055         struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
1056         int i;
1057
1058         if (false == streams_changed(core_dc, streams, stream_count))
1059                 return DC_OK;
1060
1061         dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
1062                                 __func__, stream_count);
1063
1064         for (i = 0; i < stream_count; i++) {
1065                 struct dc_stream_state *stream = streams[i];
1066                 struct dc_stream_status *status = dc_stream_get_status(stream);
1067                 int j;
1068
1069                 dc_stream_log(stream,
1070                                 core_dc->ctx->logger,
1071                                 LOG_DC);
1072
1073                 set[i].stream = stream;
1074
1075                 if (status) {
1076                         set[i].surface_count = status->surface_count;
1077                         for (j = 0; j < status->surface_count; j++)
1078                                 set[i].surfaces[j] = status->surfaces[j];
1079                 }
1080
1081         }
1082
1083         if (!validate_streams(dc, set, stream_count))
1084                 return false;
1085
1086         if (!validate_surfaces(dc, set, stream_count))
1087                 return false;
1088
1089         context = dm_alloc(sizeof(struct validate_context));
1090         if (context == NULL)
1091                 goto context_alloc_fail;
1092
1093         ++context->ref_count;
1094
1095         result = core_dc->res_pool->funcs->validate_with_context(
1096                         core_dc, set, stream_count, context, core_dc->current_context);
1097         if (result != DC_OK){
1098                 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
1099                                         "%s: Context validation failed! dc_status:%d\n",
1100                                         __func__,
1101                                         result);
1102                 BREAK_TO_DEBUGGER();
1103                 goto fail;
1104         }
1105
1106         result = dc_commit_context_no_check(dc, context);
1107
1108 fail:
1109         dc_release_validate_context(context);
1110
1111 context_alloc_fail:
1112         return (result == DC_OK);
1113 }
1114
1115 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1116 {
1117         int i;
1118         struct core_dc *core_dc = DC_TO_CORE(dc);
1119         struct validate_context *context = core_dc->current_context;
1120
1121         post_surface_trace(dc);
1122
1123         for (i = 0; i < core_dc->res_pool->pipe_count; i++)
1124                 if (context->res_ctx.pipe_ctx[i].stream == NULL
1125                                 || context->res_ctx.pipe_ctx[i].surface == NULL)
1126                         core_dc->hwss.power_down_front_end(core_dc, i);
1127
1128         /* 3rd param should be true, temp w/a for RV*/
1129 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1130         core_dc->hwss.set_bandwidth(core_dc, context, core_dc->ctx->dce_version < DCN_VERSION_1_0);
1131 #else
1132         core_dc->hwss.set_bandwidth(core_dc, context, true);
1133 #endif
1134         return true;
1135 }
1136
1137 bool dc_commit_surfaces_to_stream(
1138                 struct dc *dc,
1139                 struct dc_plane_state **new_surfaces,
1140                 uint8_t new_surface_count,
1141                 struct dc_stream_state *dc_stream)
1142 {
1143         struct dc_surface_update updates[MAX_SURFACES];
1144         struct dc_flip_addrs flip_addr[MAX_SURFACES];
1145         struct dc_plane_info plane_info[MAX_SURFACES];
1146         struct dc_scaling_info scaling_info[MAX_SURFACES];
1147         int i;
1148         struct dc_stream_update *stream_update =
1149                         dm_alloc(sizeof(struct dc_stream_update));
1150
1151         if (!stream_update) {
1152                 BREAK_TO_DEBUGGER();
1153                 return false;
1154         }
1155
1156         memset(updates, 0, sizeof(updates));
1157         memset(flip_addr, 0, sizeof(flip_addr));
1158         memset(plane_info, 0, sizeof(plane_info));
1159         memset(scaling_info, 0, sizeof(scaling_info));
1160
1161         stream_update->src = dc_stream->src;
1162         stream_update->dst = dc_stream->dst;
1163         stream_update->out_transfer_func = dc_stream->out_transfer_func;
1164
1165         for (i = 0; i < new_surface_count; i++) {
1166                 updates[i].surface = new_surfaces[i];
1167                 updates[i].gamma =
1168                         (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1169                 updates[i].in_transfer_func = new_surfaces[i]->in_transfer_func;
1170                 flip_addr[i].address = new_surfaces[i]->address;
1171                 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1172                 plane_info[i].color_space = new_surfaces[i]->color_space;
1173                 plane_info[i].format = new_surfaces[i]->format;
1174                 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1175                 plane_info[i].rotation = new_surfaces[i]->rotation;
1176                 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1177                 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1178                 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1179                 plane_info[i].visible = new_surfaces[i]->visible;
1180                 plane_info[i].per_pixel_alpha = new_surfaces[i]->per_pixel_alpha;
1181                 plane_info[i].dcc = new_surfaces[i]->dcc;
1182                 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1183                 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1184                 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1185                 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1186
1187                 updates[i].flip_addr = &flip_addr[i];
1188                 updates[i].plane_info = &plane_info[i];
1189                 updates[i].scaling_info = &scaling_info[i];
1190         }
1191
1192         dc_update_surfaces_and_stream(
1193                         dc,
1194                         updates,
1195                         new_surface_count,
1196                         dc_stream, stream_update);
1197
1198         dc_post_update_surfaces_to_stream(dc);
1199
1200         dm_free(stream_update);
1201         return true;
1202 }
1203
1204 void dc_retain_validate_context(struct validate_context *context)
1205 {
1206         ASSERT(context->ref_count > 0);
1207         ++context->ref_count;
1208 }
1209
1210 void dc_release_validate_context(struct validate_context *context)
1211 {
1212         ASSERT(context->ref_count > 0);
1213         --context->ref_count;
1214
1215         if (context->ref_count == 0) {
1216                 dc_resource_validate_ctx_destruct(context);
1217                 dm_free(context);
1218         }
1219 }
1220
1221 static bool is_surface_in_context(
1222                 const struct validate_context *context,
1223                 const struct dc_plane_state *surface)
1224 {
1225         int j;
1226
1227         for (j = 0; j < MAX_PIPES; j++) {
1228                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1229
1230                 if (surface == pipe_ctx->surface) {
1231                         return true;
1232                 }
1233         }
1234
1235         return false;
1236 }
1237
1238 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1239 {
1240         switch (format) {
1241         case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
1242         case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
1243                 return 12;
1244         case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1245         case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1246         case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
1247         case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
1248                 return 16;
1249         case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1250         case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1251         case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1252         case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1253                 return 32;
1254         case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1255         case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1256         case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1257                 return 64;
1258         default:
1259                 ASSERT_CRITICAL(false);
1260                 return -1;
1261         }
1262 }
1263
1264 static enum surface_update_type get_plane_info_update_type(
1265                 const struct dc_surface_update *u,
1266                 int surface_index)
1267 {
1268         struct dc_plane_info temp_plane_info;
1269         memset(&temp_plane_info, 0, sizeof(temp_plane_info));
1270
1271         if (!u->plane_info)
1272                 return UPDATE_TYPE_FAST;
1273
1274         temp_plane_info = *u->plane_info;
1275
1276         /* Copy all parameters that will cause a full update
1277          * from current surface, the rest of the parameters
1278          * from provided plane configuration.
1279          * Perform memory compare and special validation
1280          * for those that can cause fast/medium updates
1281          */
1282
1283         /* Full update parameters */
1284         temp_plane_info.color_space = u->surface->color_space;
1285         temp_plane_info.dcc = u->surface->dcc;
1286         temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
1287         temp_plane_info.plane_size = u->surface->plane_size;
1288         temp_plane_info.rotation = u->surface->rotation;
1289         temp_plane_info.stereo_format = u->surface->stereo_format;
1290         temp_plane_info.tiling_info = u->surface->tiling_info;
1291
1292         if (surface_index == 0)
1293                 temp_plane_info.visible = u->plane_info->visible;
1294         else
1295                 temp_plane_info.visible = u->surface->visible;
1296
1297         if (memcmp(u->plane_info, &temp_plane_info,
1298                         sizeof(struct dc_plane_info)) != 0)
1299                 return UPDATE_TYPE_FULL;
1300
1301         if (pixel_format_to_bpp(u->plane_info->format) !=
1302                         pixel_format_to_bpp(u->surface->format)) {
1303                 return UPDATE_TYPE_FULL;
1304         } else {
1305                 return UPDATE_TYPE_MED;
1306         }
1307 }
1308
1309 static enum surface_update_type  get_scaling_info_update_type(
1310                 const struct dc_surface_update *u)
1311 {
1312         if (!u->scaling_info)
1313                 return UPDATE_TYPE_FAST;
1314
1315         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1316                         || u->scaling_info->src_rect.height != u->surface->src_rect.height
1317                         || u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1318                         || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1319                         || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1320                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height)
1321                 return UPDATE_TYPE_FULL;
1322
1323         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1324                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
1325                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1326                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1327                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1328                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1329                 return UPDATE_TYPE_MED;
1330
1331         return UPDATE_TYPE_FAST;
1332 }
1333
1334 static enum surface_update_type det_surface_update(
1335                 const struct core_dc *dc,
1336                 const struct dc_surface_update *u,
1337                 int surface_index)
1338 {
1339         const struct validate_context *context = dc->current_context;
1340         enum surface_update_type type = UPDATE_TYPE_FAST;
1341         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1342
1343         if (!is_surface_in_context(context, u->surface))
1344                 return UPDATE_TYPE_FULL;
1345
1346         type = get_plane_info_update_type(u, surface_index);
1347         if (overall_type < type)
1348                 overall_type = type;
1349
1350         type = get_scaling_info_update_type(u);
1351         if (overall_type < type)
1352                 overall_type = type;
1353
1354         if (u->in_transfer_func ||
1355                 u->hdr_static_metadata) {
1356                 if (overall_type < UPDATE_TYPE_MED)
1357                         overall_type = UPDATE_TYPE_MED;
1358         }
1359
1360         return overall_type;
1361 }
1362
1363 enum surface_update_type dc_check_update_surfaces_for_stream(
1364                 struct dc *dc,
1365                 struct dc_surface_update *updates,
1366                 int surface_count,
1367                 struct dc_stream_update *stream_update,
1368                 const struct dc_stream_status *stream_status)
1369 {
1370         struct core_dc *core_dc = DC_TO_CORE(dc);
1371         int i;
1372         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1373
1374         if (stream_status == NULL || stream_status->surface_count != surface_count)
1375                 return UPDATE_TYPE_FULL;
1376
1377         if (stream_update)
1378                 return UPDATE_TYPE_FULL;
1379
1380         for (i = 0 ; i < surface_count; i++) {
1381                 enum surface_update_type type =
1382                                 det_surface_update(core_dc, &updates[i], i);
1383
1384                 if (type == UPDATE_TYPE_FULL)
1385                         return type;
1386
1387                 if (overall_type < type)
1388                         overall_type = type;
1389         }
1390
1391         return overall_type;
1392 }
1393
1394 enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1395
1396 void dc_update_surfaces_and_stream(struct dc *dc,
1397                 struct dc_surface_update *srf_updates, int surface_count,
1398                 struct dc_stream_state *stream,
1399                 struct dc_stream_update *stream_update)
1400 {
1401         struct core_dc *core_dc = DC_TO_CORE(dc);
1402         struct validate_context *context;
1403         int i, j;
1404         enum surface_update_type update_type;
1405         const struct dc_stream_status *stream_status;
1406         struct dc_context *dc_ctx = core_dc->ctx;
1407
1408         /* Currently this function do not result in any HW programming
1409          * when called with 0 surface. But proceeding will cause
1410          * SW state to be updated in validate_context. So we might as
1411          * well make it not do anything at all until the hw programming
1412          * is implemented properly to handle 0 surface case.
1413          * TODO: fix hw programming then remove this early return
1414          */
1415         if (surface_count == 0)
1416                 return;
1417
1418         stream_status = dc_stream_get_status(stream);
1419
1420         ASSERT(stream_status);
1421         if (!stream_status)
1422                 return; /* Cannot commit surface to stream that is not committed */
1423
1424 #ifdef ENABLE_FBC
1425         if (srf_updates->flip_addr) {
1426                 if (srf_updates->flip_addr->address.grph.addr.low_part == 0)
1427                         ASSERT(0);
1428         }
1429 #endif
1430         context = core_dc->current_context;
1431
1432         /* update current stream with the new updates */
1433         if (stream_update) {
1434                 if ((stream_update->src.height != 0) &&
1435                                 (stream_update->src.width != 0))
1436                         stream->src = stream_update->src;
1437
1438                 if ((stream_update->dst.height != 0) &&
1439                                 (stream_update->dst.width != 0))
1440                         stream->dst = stream_update->dst;
1441
1442                 if (stream_update->out_transfer_func &&
1443                                 stream_update->out_transfer_func !=
1444                                                 stream->out_transfer_func) {
1445                         if (stream->out_transfer_func != NULL)
1446                                 dc_transfer_func_release(stream->out_transfer_func);
1447                         dc_transfer_func_retain(stream_update->out_transfer_func);
1448                         stream->out_transfer_func =
1449                                 stream_update->out_transfer_func;
1450                 }
1451         }
1452
1453         /* do not perform surface update if surface has invalid dimensions
1454          * (all zero) and no scaling_info is provided
1455          */
1456         if (surface_count > 0 &&
1457                         srf_updates->surface->src_rect.width == 0 &&
1458                         srf_updates->surface->src_rect.height == 0 &&
1459                         srf_updates->surface->dst_rect.width == 0 &&
1460                         srf_updates->surface->dst_rect.height == 0 &&
1461                         !srf_updates->scaling_info) {
1462                 ASSERT(false);
1463                 return;
1464         }
1465
1466         update_type = dc_check_update_surfaces_for_stream(
1467                         dc, srf_updates, surface_count, stream_update, stream_status);
1468
1469         if (update_type >= update_surface_trace_level)
1470                 update_surface_trace(dc, srf_updates, surface_count);
1471
1472         if (update_type >= UPDATE_TYPE_FULL) {
1473                 struct dc_plane_state *new_surfaces[MAX_SURFACES] = {0};
1474
1475                 for (i = 0; i < surface_count; i++)
1476                         new_surfaces[i] = srf_updates[i].surface;
1477
1478                 /* initialize scratch memory for building context */
1479                 context = dm_alloc(sizeof(*context));
1480                 if (context == NULL)
1481                                 goto context_alloc_fail;
1482
1483                 ++context->ref_count;
1484
1485                 dc_resource_validate_ctx_copy_construct(
1486                                 core_dc->current_context, context);
1487
1488                 /* add surface to context */
1489                 if (!resource_attach_surfaces_to_context(
1490                                 new_surfaces, surface_count, stream,
1491                                 context, core_dc->res_pool)) {
1492                         BREAK_TO_DEBUGGER();
1493                         goto fail;
1494                 }
1495         }
1496
1497         /* save update parameters into surface */
1498         for (i = 0; i < surface_count; i++) {
1499                 struct dc_plane_state *surface = srf_updates[i].surface;
1500
1501                 if (srf_updates[i].flip_addr) {
1502                         surface->address = srf_updates[i].flip_addr->address;
1503                         surface->flip_immediate =
1504                                         srf_updates[i].flip_addr->flip_immediate;
1505                 }
1506
1507                 if (srf_updates[i].scaling_info) {
1508                         surface->scaling_quality =
1509                                         srf_updates[i].scaling_info->scaling_quality;
1510                         surface->dst_rect =
1511                                         srf_updates[i].scaling_info->dst_rect;
1512                         surface->src_rect =
1513                                         srf_updates[i].scaling_info->src_rect;
1514                         surface->clip_rect =
1515                                         srf_updates[i].scaling_info->clip_rect;
1516                 }
1517
1518                 if (srf_updates[i].plane_info) {
1519                         surface->color_space =
1520                                         srf_updates[i].plane_info->color_space;
1521                         surface->format =
1522                                         srf_updates[i].plane_info->format;
1523                         surface->plane_size =
1524                                         srf_updates[i].plane_info->plane_size;
1525                         surface->rotation =
1526                                         srf_updates[i].plane_info->rotation;
1527                         surface->horizontal_mirror =
1528                                         srf_updates[i].plane_info->horizontal_mirror;
1529                         surface->stereo_format =
1530                                         srf_updates[i].plane_info->stereo_format;
1531                         surface->tiling_info =
1532                                         srf_updates[i].plane_info->tiling_info;
1533                         surface->visible =
1534                                         srf_updates[i].plane_info->visible;
1535                         surface->per_pixel_alpha =
1536                                         srf_updates[i].plane_info->per_pixel_alpha;
1537                         surface->dcc =
1538                                         srf_updates[i].plane_info->dcc;
1539                 }
1540
1541                 if (update_type >= UPDATE_TYPE_MED) {
1542                         for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1543                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1544
1545                                 if (pipe_ctx->surface != surface)
1546                                         continue;
1547
1548                                 resource_build_scaling_params(pipe_ctx);
1549                         }
1550                 }
1551
1552                 if (srf_updates[i].gamma &&
1553                         srf_updates[i].gamma != surface->gamma_correction) {
1554                         if (surface->gamma_correction != NULL)
1555                                 dc_gamma_release(&surface->gamma_correction);
1556
1557                         dc_gamma_retain(srf_updates[i].gamma);
1558                         surface->gamma_correction = srf_updates[i].gamma;
1559                 }
1560
1561                 if (srf_updates[i].in_transfer_func &&
1562                         srf_updates[i].in_transfer_func != surface->in_transfer_func) {
1563                         if (surface->in_transfer_func != NULL)
1564                                 dc_transfer_func_release(
1565                                                 surface->
1566                                                 in_transfer_func);
1567
1568                         dc_transfer_func_retain(
1569                                         srf_updates[i].in_transfer_func);
1570                         surface->in_transfer_func =
1571                                         srf_updates[i].in_transfer_func;
1572                 }
1573
1574                 if (srf_updates[i].hdr_static_metadata)
1575                         surface->hdr_static_ctx =
1576                                 *(srf_updates[i].hdr_static_metadata);
1577         }
1578
1579         if (update_type == UPDATE_TYPE_FULL) {
1580                 if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1581                         BREAK_TO_DEBUGGER();
1582                         goto fail;
1583                 } else {
1584                         core_dc->hwss.set_bandwidth(core_dc, context, false);
1585                         context_clock_trace(dc, context);
1586                 }
1587         }
1588
1589         if (update_type > UPDATE_TYPE_FAST) {
1590                 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1591                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1592
1593                         core_dc->hwss.wait_for_mpcc_disconnect(core_dc, core_dc->res_pool, pipe_ctx);
1594                 }
1595         }
1596
1597         if (surface_count == 0)
1598                 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1599
1600         /* Lock pipes for provided surfaces, or all active if full update*/
1601         for (i = 0; i < surface_count; i++) {
1602                 struct dc_plane_state *surface = srf_updates[i].surface;
1603
1604                 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1605                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1606
1607                         if (update_type != UPDATE_TYPE_FULL && pipe_ctx->surface != surface)
1608                                 continue;
1609                         if (!pipe_ctx->surface || pipe_ctx->top_pipe)
1610                                 continue;
1611
1612                         core_dc->hwss.pipe_control_lock(
1613                                         core_dc,
1614                                         pipe_ctx,
1615                                         true);
1616                 }
1617                 if (update_type == UPDATE_TYPE_FULL)
1618                         break;
1619         }
1620
1621         /* Full fe update*/
1622         for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1623                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1624                 struct pipe_ctx *cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1625                 bool is_new_pipe_surface = cur_pipe_ctx->surface != pipe_ctx->surface;
1626                 struct dc_cursor_position position = { 0 };
1627
1628                 if (update_type != UPDATE_TYPE_FULL || !pipe_ctx->surface)
1629                         continue;
1630
1631                 if (!pipe_ctx->top_pipe)
1632                         core_dc->hwss.apply_ctx_for_surface(
1633                                         core_dc, pipe_ctx->surface, context);
1634
1635                 /* TODO: this is a hack w/a for switching from mpo to pipe split */
1636                 dc_stream_set_cursor_position(pipe_ctx->stream, &position);
1637
1638                 if (is_new_pipe_surface) {
1639                         core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1640                         core_dc->hwss.set_input_transfer_func(
1641                                         pipe_ctx, pipe_ctx->surface);
1642                         core_dc->hwss.set_output_transfer_func(
1643                                         pipe_ctx, pipe_ctx->stream);
1644                 }
1645         }
1646
1647         if (update_type > UPDATE_TYPE_FAST)
1648                 context_timing_trace(dc, &context->res_ctx);
1649
1650         /* Perform requested Updates */
1651         for (i = 0; i < surface_count; i++) {
1652                 struct dc_plane_state *surface = srf_updates[i].surface;
1653
1654                 if (update_type == UPDATE_TYPE_MED)
1655                         core_dc->hwss.apply_ctx_for_surface(
1656                                         core_dc, surface, context);
1657
1658                 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1659                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1660
1661                         if (pipe_ctx->surface != surface)
1662                                 continue;
1663
1664                         if (srf_updates[i].flip_addr)
1665                                 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1666
1667                         if (update_type == UPDATE_TYPE_FAST)
1668                                 continue;
1669
1670                         if (srf_updates[i].in_transfer_func)
1671                                 core_dc->hwss.set_input_transfer_func(
1672                                                 pipe_ctx, pipe_ctx->surface);
1673
1674                         if (stream_update != NULL &&
1675                                         stream_update->out_transfer_func != NULL) {
1676                                 core_dc->hwss.set_output_transfer_func(
1677                                                 pipe_ctx, pipe_ctx->stream);
1678                         }
1679
1680                         if (srf_updates[i].hdr_static_metadata) {
1681                                 resource_build_info_frame(pipe_ctx);
1682                                 core_dc->hwss.update_info_frame(pipe_ctx);
1683                         }
1684                 }
1685         }
1686
1687         /* Unlock pipes */
1688         for (i = core_dc->res_pool->pipe_count - 1; i >= 0; i--) {
1689                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1690
1691                 for (j = 0; j < surface_count; j++) {
1692                         if (update_type != UPDATE_TYPE_FULL &&
1693                             srf_updates[j].surface != pipe_ctx->surface)
1694                                 continue;
1695                         if (!pipe_ctx->surface || pipe_ctx->top_pipe)
1696                                 continue;
1697
1698                         core_dc->hwss.pipe_control_lock(
1699                                         core_dc,
1700                                         pipe_ctx,
1701                                         false);
1702
1703                         break;
1704                 }
1705         }
1706
1707         if (core_dc->current_context != context) {
1708                 dc_release_validate_context(core_dc->current_context);
1709                 core_dc->current_context = context;
1710         }
1711         return;
1712
1713 fail:
1714         dc_release_validate_context(context);
1715
1716 context_alloc_fail:
1717         DC_ERROR("Failed to allocate new validate context!\n");
1718 }
1719
1720 uint8_t dc_get_current_stream_count(const struct dc *dc)
1721 {
1722         struct core_dc *core_dc = DC_TO_CORE(dc);
1723         return core_dc->current_context->stream_count;
1724 }
1725
1726 struct dc_stream_state *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
1727 {
1728         struct core_dc *core_dc = DC_TO_CORE(dc);
1729         if (i < core_dc->current_context->stream_count)
1730                 return core_dc->current_context->streams[i];
1731         return NULL;
1732 }
1733
1734 struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1735 {
1736         struct core_dc *core_dc = DC_TO_CORE(dc);
1737         return core_dc->links[link_index];
1738 }
1739
1740 const struct graphics_object_id dc_get_link_id_at_index(
1741         struct dc *dc, uint32_t link_index)
1742 {
1743         struct core_dc *core_dc = DC_TO_CORE(dc);
1744         return core_dc->links[link_index]->link_id;
1745 }
1746
1747 enum dc_irq_source dc_get_hpd_irq_source_at_index(
1748         struct dc *dc, uint32_t link_index)
1749 {
1750         struct core_dc *core_dc = DC_TO_CORE(dc);
1751         return core_dc->links[link_index]->irq_source_hpd;
1752 }
1753
1754 const struct audio **dc_get_audios(struct dc *dc)
1755 {
1756         struct core_dc *core_dc = DC_TO_CORE(dc);
1757         return (const struct audio **)core_dc->res_pool->audios;
1758 }
1759
1760 enum dc_irq_source dc_interrupt_to_irq_source(
1761                 struct dc *dc,
1762                 uint32_t src_id,
1763                 uint32_t ext_id)
1764 {
1765         struct core_dc *core_dc = DC_TO_CORE(dc);
1766         return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1767 }
1768
1769 void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1770 {
1771         struct core_dc *core_dc;
1772
1773         if (dc == NULL)
1774                 return;
1775         core_dc = DC_TO_CORE(dc);
1776
1777         dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1778 }
1779
1780 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1781 {
1782         struct core_dc *core_dc = DC_TO_CORE(dc);
1783         dal_irq_service_ack(core_dc->res_pool->irqs, src);
1784 }
1785
1786 void dc_set_power_state(
1787         struct dc *dc,
1788         enum dc_acpi_cm_power_state power_state)
1789 {
1790         struct core_dc *core_dc = DC_TO_CORE(dc);
1791         int ref_count;
1792
1793         switch (power_state) {
1794         case DC_ACPI_CM_POWER_STATE_D0:
1795                 core_dc->hwss.init_hw(core_dc);
1796                 break;
1797         default:
1798
1799                 core_dc->hwss.power_down(core_dc);
1800
1801                 /* Zero out the current context so that on resume we start with
1802                  * clean state, and dc hw programming optimizations will not
1803                  * cause any trouble.
1804                  */
1805
1806                 /* Preserve refcount */
1807                 ref_count = core_dc->current_context->ref_count;
1808                 dc_resource_validate_ctx_destruct(core_dc->current_context);
1809                 memset(core_dc->current_context, 0,
1810                                 sizeof(*core_dc->current_context));
1811                 core_dc->current_context->ref_count = ref_count;
1812
1813                 break;
1814         }
1815
1816 }
1817
1818 void dc_resume(const struct dc *dc)
1819 {
1820         struct core_dc *core_dc = DC_TO_CORE(dc);
1821
1822         uint32_t i;
1823
1824         for (i = 0; i < core_dc->link_count; i++)
1825                 core_link_resume(core_dc->links[i]);
1826 }
1827
1828 bool dc_read_aux_dpcd(
1829                 struct dc *dc,
1830                 uint32_t link_index,
1831                 uint32_t address,
1832                 uint8_t *data,
1833                 uint32_t size)
1834 {
1835         struct core_dc *core_dc = DC_TO_CORE(dc);
1836
1837         struct dc_link *link = core_dc->links[link_index];
1838         enum ddc_result r = dal_ddc_service_read_dpcd_data(
1839                         link->ddc,
1840                         false,
1841                         I2C_MOT_UNDEF,
1842                         address,
1843                         data,
1844                         size);
1845         return r == DDC_RESULT_SUCESSFULL;
1846 }
1847
1848 bool dc_write_aux_dpcd(
1849                 struct dc *dc,
1850                 uint32_t link_index,
1851                 uint32_t address,
1852                 const uint8_t *data,
1853                 uint32_t size)
1854 {
1855         struct core_dc *core_dc = DC_TO_CORE(dc);
1856         struct dc_link *link = core_dc->links[link_index];
1857
1858         enum ddc_result r = dal_ddc_service_write_dpcd_data(
1859                         link->ddc,
1860                         false,
1861                         I2C_MOT_UNDEF,
1862                         address,
1863                         data,
1864                         size);
1865         return r == DDC_RESULT_SUCESSFULL;
1866 }
1867
1868 bool dc_read_aux_i2c(
1869                 struct dc *dc,
1870                 uint32_t link_index,
1871                 enum i2c_mot_mode mot,
1872                 uint32_t address,
1873                 uint8_t *data,
1874                 uint32_t size)
1875 {
1876         struct core_dc *core_dc = DC_TO_CORE(dc);
1877
1878                 struct dc_link *link = core_dc->links[link_index];
1879                 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1880                         link->ddc,
1881                         true,
1882                         mot,
1883                         address,
1884                         data,
1885                         size);
1886                 return r == DDC_RESULT_SUCESSFULL;
1887 }
1888
1889 bool dc_write_aux_i2c(
1890                 struct dc *dc,
1891                 uint32_t link_index,
1892                 enum i2c_mot_mode mot,
1893                 uint32_t address,
1894                 const uint8_t *data,
1895                 uint32_t size)
1896 {
1897         struct core_dc *core_dc = DC_TO_CORE(dc);
1898         struct dc_link *link = core_dc->links[link_index];
1899
1900         enum ddc_result r = dal_ddc_service_write_dpcd_data(
1901                         link->ddc,
1902                         true,
1903                         mot,
1904                         address,
1905                         data,
1906                         size);
1907         return r == DDC_RESULT_SUCESSFULL;
1908 }
1909
1910 bool dc_query_ddc_data(
1911                 struct dc *dc,
1912                 uint32_t link_index,
1913                 uint32_t address,
1914                 uint8_t *write_buf,
1915                 uint32_t write_size,
1916                 uint8_t *read_buf,
1917                 uint32_t read_size) {
1918
1919         struct core_dc *core_dc = DC_TO_CORE(dc);
1920
1921         struct dc_link *link = core_dc->links[link_index];
1922
1923         bool result = dal_ddc_service_query_ddc_data(
1924                         link->ddc,
1925                         address,
1926                         write_buf,
1927                         write_size,
1928                         read_buf,
1929                         read_size);
1930
1931         return result;
1932 }
1933
1934 bool dc_submit_i2c(
1935                 struct dc *dc,
1936                 uint32_t link_index,
1937                 struct i2c_command *cmd)
1938 {
1939         struct core_dc *core_dc = DC_TO_CORE(dc);
1940
1941         struct dc_link *link = core_dc->links[link_index];
1942         struct ddc_service *ddc = link->ddc;
1943
1944         return dal_i2caux_submit_i2c_command(
1945                 ddc->ctx->i2caux,
1946                 ddc->ddc_pin,
1947                 cmd);
1948 }
1949
1950 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
1951 {
1952         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1953                 BREAK_TO_DEBUGGER();
1954                 return false;
1955         }
1956
1957         dc_sink_retain(sink);
1958
1959         dc_link->remote_sinks[dc_link->sink_count] = sink;
1960         dc_link->sink_count++;
1961
1962         return true;
1963 }
1964
1965 struct dc_sink *dc_link_add_remote_sink(
1966                 struct dc_link *link,
1967                 const uint8_t *edid,
1968                 int len,
1969                 struct dc_sink_init_data *init_data)
1970 {
1971         struct dc_sink *dc_sink;
1972         enum dc_edid_status edid_status;
1973
1974         if (len > MAX_EDID_BUFFER_SIZE) {
1975                 dm_error("Max EDID buffer size breached!\n");
1976                 return NULL;
1977         }
1978
1979         if (!init_data) {
1980                 BREAK_TO_DEBUGGER();
1981                 return NULL;
1982         }
1983
1984         if (!init_data->link) {
1985                 BREAK_TO_DEBUGGER();
1986                 return NULL;
1987         }
1988
1989         dc_sink = dc_sink_create(init_data);
1990
1991         if (!dc_sink)
1992                 return NULL;
1993
1994         memmove(dc_sink->dc_edid.raw_edid, edid, len);
1995         dc_sink->dc_edid.length = len;
1996
1997         if (!link_add_remote_sink_helper(
1998                         link,
1999                         dc_sink))
2000                 goto fail_add_sink;
2001
2002         edid_status = dm_helpers_parse_edid_caps(
2003                         link->ctx,
2004                         &dc_sink->dc_edid,
2005                         &dc_sink->edid_caps);
2006
2007         if (edid_status != EDID_OK)
2008                 goto fail;
2009
2010         return dc_sink;
2011 fail:
2012         dc_link_remove_remote_sink(link, dc_sink);
2013 fail_add_sink:
2014         dc_sink_release(dc_sink);
2015         return NULL;
2016 }
2017
2018 void dc_link_set_sink(struct dc_link *link, struct dc_sink *sink)
2019 {
2020         link->local_sink = sink;
2021
2022         if (sink == NULL) {
2023                 link->type = dc_connection_none;
2024         } else {
2025                 link->type = dc_connection_single;
2026         }
2027 }
2028
2029 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
2030 {
2031         int i;
2032
2033         if (!link->sink_count) {
2034                 BREAK_TO_DEBUGGER();
2035                 return;
2036         }
2037
2038         for (i = 0; i < link->sink_count; i++) {
2039                 if (link->remote_sinks[i] == sink) {
2040                         dc_sink_release(sink);
2041                         link->remote_sinks[i] = NULL;
2042
2043                         /* shrink array to remove empty place */
2044                         while (i < link->sink_count - 1) {
2045                                 link->remote_sinks[i] = link->remote_sinks[i+1];
2046                                 i++;
2047                         }
2048                         link->remote_sinks[i] = NULL;
2049                         link->sink_count--;
2050                         return;
2051                 }
2052         }
2053 }
2054
2055 bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
2056 {
2057         int i;
2058         struct core_dc *core_dc = DC_TO_CORE(dc);
2059         struct mem_input *mi = NULL;
2060
2061         for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
2062                 if (core_dc->res_pool->mis[i] != NULL) {
2063                         mi = core_dc->res_pool->mis[i];
2064                         break;
2065                 }
2066         }
2067         if (mi == NULL) {
2068                 dm_error("no mem_input!\n");
2069                 return false;
2070         }
2071
2072         if (core_dc->hwss.update_dchub)
2073                 core_dc->hwss.update_dchub(core_dc->hwseq, dh_data);
2074         else
2075                 ASSERT(core_dc->hwss.update_dchub);
2076
2077
2078         return true;
2079
2080 }
2081
2082 void dc_log_hw_state(struct dc *dc)
2083 {
2084         struct core_dc *core_dc = DC_TO_CORE(dc);
2085
2086         if (core_dc->hwss.log_hw_state)
2087                 core_dc->hwss.log_hw_state(core_dc);
2088 }
2089