Commit | Line | Data |
---|---|---|
235c6763 AP |
1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: AMD | |
23 | * | |
24 | */ | |
25 | ||
26 | ||
27 | #include "dm_services.h" | |
28 | #include "dm_helpers.h" | |
29 | #include "core_types.h" | |
30 | #include "resource.h" | |
31 | #include "dccg.h" | |
32 | #include "dce/dce_hwseq.h" | |
33 | #include "dcn30/dcn30_cm_common.h" | |
34 | #include "reg_helper.h" | |
35 | #include "abm.h" | |
36 | #include "hubp.h" | |
37 | #include "dchubbub.h" | |
38 | #include "timing_generator.h" | |
39 | #include "opp.h" | |
40 | #include "ipp.h" | |
41 | #include "mpc.h" | |
42 | #include "mcif_wb.h" | |
43 | #include "dc_dmub_srv.h" | |
44 | #include "link_hwss.h" | |
45 | #include "dpcd_defs.h" | |
46 | #include "dcn32_hwseq.h" | |
47 | #include "clk_mgr.h" | |
48 | #include "dsc.h" | |
49 | #include "dcn20/dcn20_optc.h" | |
85f4bc0c | 50 | #include "dce/dmub_hw_lock_mgr.h" |
e53524cd | 51 | #include "dcn32/dcn32_resource.h" |
d5a43956 | 52 | #include "link.h" |
f583db81 | 53 | #include "../dcn20/dcn20_hwseq.h" |
09a4ec5d | 54 | #include "dc_state_priv.h" |
235c6763 AP |
55 | |
56 | #define DC_LOGGER_INIT(logger) | |
57 | ||
58 | #define CTX \ | |
59 | hws->ctx | |
60 | #define REG(reg)\ | |
61 | hws->regs->reg | |
62 | #define DC_LOGGER \ | |
176278d8 | 63 | dc->ctx->logger |
235c6763 AP |
64 | |
65 | #undef FN | |
66 | #define FN(reg_name, field_name) \ | |
67 | hws->shifts->field_name, hws->masks->field_name | |
68 | ||
69 | void dcn32_dsc_pg_control( | |
70 | struct dce_hwseq *hws, | |
71 | unsigned int dsc_inst, | |
72 | bool power_on) | |
73 | { | |
74 | uint32_t power_gate = power_on ? 0 : 1; | |
75 | uint32_t pwr_status = power_on ? 0 : 2; | |
76 | uint32_t org_ip_request_cntl = 0; | |
176278d8 | 77 | struct dc *dc = hws->ctx->dc; |
235c6763 | 78 | |
176278d8 | 79 | if (dc->debug.disable_dsc_power_gate) |
235c6763 AP |
80 | return; |
81 | ||
176278d8 | 82 | if (!dc->debug.enable_double_buffered_dsc_pg_support) |
40255df3 MA |
83 | return; |
84 | ||
235c6763 AP |
85 | REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); |
86 | if (org_ip_request_cntl == 0) | |
87 | REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); | |
88 | ||
176278d8 | 89 | DC_LOG_DSC("%s DSC power gate for inst %d", power_gate ? "enable" : "disable", dsc_inst); |
235c6763 AP |
90 | switch (dsc_inst) { |
91 | case 0: /* DSC0 */ | |
92 | REG_UPDATE(DOMAIN16_PG_CONFIG, | |
93 | DOMAIN_POWER_GATE, power_gate); | |
94 | ||
95 | REG_WAIT(DOMAIN16_PG_STATUS, | |
96 | DOMAIN_PGFSM_PWR_STATUS, pwr_status, | |
97 | 1, 1000); | |
98 | break; | |
99 | case 1: /* DSC1 */ | |
100 | REG_UPDATE(DOMAIN17_PG_CONFIG, | |
101 | DOMAIN_POWER_GATE, power_gate); | |
102 | ||
103 | REG_WAIT(DOMAIN17_PG_STATUS, | |
104 | DOMAIN_PGFSM_PWR_STATUS, pwr_status, | |
105 | 1, 1000); | |
106 | break; | |
107 | case 2: /* DSC2 */ | |
108 | REG_UPDATE(DOMAIN18_PG_CONFIG, | |
109 | DOMAIN_POWER_GATE, power_gate); | |
110 | ||
111 | REG_WAIT(DOMAIN18_PG_STATUS, | |
112 | DOMAIN_PGFSM_PWR_STATUS, pwr_status, | |
113 | 1, 1000); | |
114 | break; | |
115 | case 3: /* DSC3 */ | |
116 | REG_UPDATE(DOMAIN19_PG_CONFIG, | |
117 | DOMAIN_POWER_GATE, power_gate); | |
118 | ||
119 | REG_WAIT(DOMAIN19_PG_STATUS, | |
120 | DOMAIN_PGFSM_PWR_STATUS, pwr_status, | |
121 | 1, 1000); | |
122 | break; | |
123 | default: | |
124 | BREAK_TO_DEBUGGER(); | |
125 | break; | |
126 | } | |
127 | ||
128 | if (org_ip_request_cntl == 0) | |
129 | REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); | |
130 | } | |
131 | ||
132 | ||
133 | void dcn32_enable_power_gating_plane( | |
134 | struct dce_hwseq *hws, | |
135 | bool enable) | |
136 | { | |
137 | bool force_on = true; /* disable power gating */ | |
504d3cae | 138 | uint32_t org_ip_request_cntl = 0; |
235c6763 AP |
139 | |
140 | if (enable) | |
141 | force_on = false; | |
142 | ||
504d3cae HW |
143 | REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); |
144 | if (org_ip_request_cntl == 0) | |
145 | REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); | |
146 | ||
235c6763 AP |
147 | /* DCHUBP0/1/2/3 */ |
148 | REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); | |
149 | REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); | |
150 | REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); | |
151 | REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); | |
152 | ||
153 | /* DCS0/1/2/3 */ | |
154 | REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); | |
155 | REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); | |
156 | REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); | |
157 | REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); | |
504d3cae HW |
158 | |
159 | if (org_ip_request_cntl == 0) | |
160 | REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); | |
235c6763 AP |
161 | } |
162 | ||
163 | void dcn32_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) | |
164 | { | |
165 | uint32_t power_gate = power_on ? 0 : 1; | |
166 | uint32_t pwr_status = power_on ? 0 : 2; | |
167 | ||
168 | if (hws->ctx->dc->debug.disable_hubp_power_gate) | |
169 | return; | |
170 | ||
171 | if (REG(DOMAIN0_PG_CONFIG) == 0) | |
172 | return; | |
173 | ||
174 | switch (hubp_inst) { | |
175 | case 0: | |
176 | REG_SET(DOMAIN0_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate); | |
177 | REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); | |
178 | break; | |
179 | case 1: | |
180 | REG_SET(DOMAIN1_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate); | |
181 | REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); | |
182 | break; | |
183 | case 2: | |
184 | REG_SET(DOMAIN2_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate); | |
185 | REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); | |
186 | break; | |
187 | case 3: | |
188 | REG_SET(DOMAIN3_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate); | |
189 | REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); | |
190 | break; | |
191 | default: | |
192 | BREAK_TO_DEBUGGER(); | |
193 | break; | |
194 | } | |
195 | } | |
196 | ||
197 | static bool dcn32_check_no_memory_request_for_cab(struct dc *dc) | |
198 | { | |
199 | int i; | |
200 | ||
201 | /* First, check no-memory-request case */ | |
202 | for (i = 0; i < dc->current_state->stream_count; i++) { | |
46604a08 ST |
203 | if ((dc->current_state->stream_status[i].plane_count) && |
204 | (dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)) | |
235c6763 AP |
205 | /* Fail eligibility on a visible stream */ |
206 | break; | |
207 | } | |
208 | ||
209 | if (i == dc->current_state->stream_count) | |
210 | return true; | |
211 | ||
212 | return false; | |
213 | } | |
214 | ||
235c6763 AP |
215 | |
216 | /* This function loops through every surface that needs to be cached in CAB for SS, | |
217 | * and calculates the total number of ways required to store all surfaces (primary, | |
218 | * meta, cursor). | |
219 | */ | |
220 | static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx) | |
221 | { | |
238debca | 222 | int i; |
79f3f1b6 | 223 | uint32_t num_ways = 0; |
238debca DV |
224 | uint32_t mall_ss_size_bytes = 0; |
225 | ||
226 | mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes; | |
227 | // TODO add additional logic for PSR active stream exclusion optimization | |
228 | // mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes; | |
235c6763 | 229 | |
238debca | 230 | // Include cursor size for CAB allocation |
525a65c7 | 231 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
238debca | 232 | struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i]; |
235c6763 | 233 | |
238debca DV |
234 | if (!pipe->stream || !pipe->plane_state) |
235 | continue; | |
235c6763 | 236 | |
238debca | 237 | mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false); |
235c6763 AP |
238 | } |
239 | ||
240 | // Convert number of cache lines required to number of ways | |
525a65c7 AL |
241 | if (dc->debug.force_mall_ss_num_ways > 0) { |
242 | num_ways = dc->debug.force_mall_ss_num_ways; | |
8cffa89b DV |
243 | } else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) { |
244 | num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes); | |
238debca | 245 | } else { |
8cffa89b | 246 | num_ways = 0; |
525a65c7 | 247 | } |
238debca | 248 | |
235c6763 AP |
249 | return num_ways; |
250 | } | |
251 | ||
252 | bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) | |
253 | { | |
254 | union dmub_rb_cmd cmd; | |
79f3f1b6 ST |
255 | uint8_t i; |
256 | uint32_t ways; | |
c7dafdfa | 257 | int j; |
a6135dde | 258 | bool mall_ss_unsupported = false; |
f0068dd0 | 259 | struct dc_plane_state *plane = NULL; |
235c6763 AP |
260 | |
261 | if (!dc->ctx->dmub_srv) | |
262 | return false; | |
263 | ||
0bed85e4 DV |
264 | for (i = 0; i < dc->current_state->stream_count; i++) { |
265 | /* MALL SS messaging is not supported with PSR at this time */ | |
266 | if (dc->current_state->streams[i] != NULL && | |
e37f5bd8 AL |
267 | dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && |
268 | (dc->current_state->stream_count > 1 || (!dc->current_state->streams[i]->dpms_off && | |
269 | dc->current_state->stream_status[i].plane_count > 0))) | |
0bed85e4 DV |
270 | return false; |
271 | } | |
272 | ||
235c6763 | 273 | if (enable) { |
14f293e0 AH |
274 | /* 1. Check no memory request case for CAB. |
275 | * If no memory request case, send CAB_ACTION NO_DF_REQ DMUB message | |
276 | */ | |
277 | if (dcn32_check_no_memory_request_for_cab(dc)) { | |
278 | /* Enable no-memory-requests case */ | |
279 | memset(&cmd, 0, sizeof(cmd)); | |
280 | cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS; | |
281 | cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ; | |
282 | cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); | |
283 | ||
284 | dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); | |
285 | ||
286 | return true; | |
287 | } | |
235c6763 | 288 | |
14f293e0 AH |
289 | /* 2. Check if all surfaces can fit in CAB. |
290 | * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message | |
291 | * and configure HUBP's to fetch from MALL | |
292 | */ | |
293 | ways = dcn32_calculate_cab_allocation(dc, dc->current_state); | |
294 | ||
295 | /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo, | |
296 | * or TMZ surface, don't try to enter MALL. | |
297 | */ | |
298 | for (i = 0; i < dc->current_state->stream_count; i++) { | |
299 | for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { | |
300 | plane = dc->current_state->stream_status[i].plane_states[j]; | |
301 | ||
302 | if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO || | |
303 | plane->address.tmz_surface) { | |
304 | mall_ss_unsupported = true; | |
f0068dd0 | 305 | break; |
14f293e0 | 306 | } |
f0068dd0 | 307 | } |
14f293e0 AH |
308 | if (mall_ss_unsupported) |
309 | break; | |
310 | } | |
311 | if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) { | |
312 | memset(&cmd, 0, sizeof(cmd)); | |
313 | cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS; | |
314 | cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB; | |
315 | cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); | |
316 | cmd.cab.cab_alloc_ways = (uint8_t)ways; | |
235c6763 | 317 | |
14f293e0 | 318 | dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); |
235c6763 | 319 | |
14f293e0 | 320 | return true; |
235c6763 | 321 | } |
14f293e0 | 322 | |
235c6763 AP |
323 | return false; |
324 | } | |
325 | ||
326 | /* Disable CAB */ | |
327 | memset(&cmd, 0, sizeof(cmd)); | |
328 | cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS; | |
329 | cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION; | |
330 | cmd.cab.header.payload_bytes = | |
331 | sizeof(cmd.cab) - sizeof(cmd.cab.header); | |
332 | ||
88927808 | 333 | dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); |
235c6763 | 334 | |
1f374171 | 335 | return true; |
235c6763 AP |
336 | } |
337 | ||
338 | /* Send DMCUB message with SubVP pipe info | |
339 | * - For each pipe in context, populate payload with required SubVP information | |
340 | * if the pipe is using SubVP for MCLK switch | |
341 | * - This function must be called while the DMUB HW lock is acquired by driver | |
342 | */ | |
343 | void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context) | |
344 | { | |
235c6763 AP |
345 | int i; |
346 | bool enable_subvp = false; | |
347 | ||
348 | if (!dc->ctx || !dc->ctx->dmub_srv) | |
349 | return; | |
350 | ||
351 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
352 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | |
353 | ||
012a04b1 | 354 | if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) { |
235c6763 AP |
355 | // There is at least 1 SubVP pipe, so enable SubVP |
356 | enable_subvp = true; | |
357 | break; | |
358 | } | |
359 | } | |
360 | dc_dmub_setup_subvp_dmub_command(dc, context, enable_subvp); | |
235c6763 AP |
361 | } |
362 | ||
85f4bc0c AL |
363 | /* Sub-Viewport DMUB lock needs to be acquired by driver whenever SubVP is active and: |
364 | * 1. Any full update for any SubVP main pipe | |
365 | * 2. Any immediate flip for any SubVP pipe | |
366 | * 3. Any flip for DRR pipe | |
367 | * 4. If SubVP was previously in use (i.e. in old context) | |
368 | */ | |
369 | void dcn32_subvp_pipe_control_lock(struct dc *dc, | |
370 | struct dc_state *context, | |
371 | bool lock, | |
372 | bool should_lock_all_pipes, | |
373 | struct pipe_ctx *top_pipe_to_program, | |
374 | bool subvp_prev_use) | |
375 | { | |
376 | unsigned int i = 0; | |
377 | bool subvp_immediate_flip = false; | |
378 | bool subvp_in_use = false; | |
b83c9e3d | 379 | struct pipe_ctx *pipe; |
09a4ec5d | 380 | enum mall_stream_type pipe_mall_type = SUBVP_NONE; |
85f4bc0c AL |
381 | |
382 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
383 | pipe = &context->res_ctx.pipe_ctx[i]; | |
09a4ec5d | 384 | pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); |
85f4bc0c | 385 | |
09a4ec5d | 386 | if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN) { |
85f4bc0c AL |
387 | subvp_in_use = true; |
388 | break; | |
389 | } | |
390 | } | |
391 | ||
392 | if (top_pipe_to_program && top_pipe_to_program->stream && top_pipe_to_program->plane_state) { | |
09a4ec5d | 393 | if (dc_state_get_pipe_subvp_type(context, top_pipe_to_program) == SUBVP_MAIN && |
85f4bc0c AL |
394 | top_pipe_to_program->plane_state->flip_immediate) |
395 | subvp_immediate_flip = true; | |
85f4bc0c AL |
396 | } |
397 | ||
319568d7 AL |
398 | // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared. |
399 | if ((subvp_in_use && (should_lock_all_pipes || subvp_immediate_flip)) || (!subvp_in_use && subvp_prev_use)) { | |
85f4bc0c AL |
400 | union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; |
401 | ||
402 | if (!lock) { | |
403 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
404 | pipe = &context->res_ctx.pipe_ctx[i]; | |
09a4ec5d | 405 | if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN && |
85f4bc0c AL |
406 | should_lock_all_pipes) |
407 | pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); | |
408 | } | |
409 | } | |
410 | ||
411 | hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; | |
412 | hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; | |
413 | hw_lock_cmd.bits.lock = lock; | |
414 | hw_lock_cmd.bits.should_release = !lock; | |
415 | dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); | |
416 | } | |
417 | } | |
418 | ||
0baae624 AL |
419 | void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params) |
420 | { | |
421 | struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc; | |
422 | bool lock = params->subvp_pipe_control_lock_fast_params.lock; | |
012a04b1 | 423 | bool subvp_immediate_flip = params->subvp_pipe_control_lock_fast_params.subvp_immediate_flip; |
0baae624 AL |
424 | |
425 | // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared. | |
426 | if (subvp_immediate_flip) { | |
427 | union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; | |
428 | ||
429 | hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; | |
430 | hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; | |
431 | hw_lock_cmd.bits.lock = lock; | |
432 | hw_lock_cmd.bits.should_release = !lock; | |
433 | dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); | |
434 | } | |
435 | } | |
85f4bc0c | 436 | |
28e5c9d6 | 437 | bool dcn32_set_mpc_shaper_3dlut( |
235c6763 AP |
438 | struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) |
439 | { | |
440 | struct dpp *dpp_base = pipe_ctx->plane_res.dpp; | |
441 | int mpcc_id = pipe_ctx->plane_res.hubp->inst; | |
442 | struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; | |
443 | bool result = false; | |
444 | ||
445 | const struct pwl_params *shaper_lut = NULL; | |
446 | //get the shaper lut params | |
447 | if (stream->func_shaper) { | |
448 | if (stream->func_shaper->type == TF_TYPE_HWPWL) | |
449 | shaper_lut = &stream->func_shaper->pwl; | |
450 | else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { | |
27fc10d1 | 451 | cm_helper_translate_curve_to_hw_format(stream->ctx, |
235c6763 AP |
452 | stream->func_shaper, |
453 | &dpp_base->shaper_params, true); | |
454 | shaper_lut = &dpp_base->shaper_params; | |
455 | } | |
456 | } | |
457 | ||
458 | if (stream->lut3d_func && | |
459 | stream->lut3d_func->state.bits.initialized == 1) { | |
460 | ||
461 | result = mpc->funcs->program_3dlut(mpc, | |
462 | &stream->lut3d_func->lut_3d, | |
463 | mpcc_id); | |
464 | ||
465 | result = mpc->funcs->program_shaper(mpc, | |
466 | shaper_lut, | |
467 | mpcc_id); | |
468 | } | |
469 | ||
470 | return result; | |
471 | } | |
90f33674 ML |
472 | |
473 | bool dcn32_set_mcm_luts( | |
474 | struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) | |
475 | { | |
476 | struct dpp *dpp_base = pipe_ctx->plane_res.dpp; | |
477 | int mpcc_id = pipe_ctx->plane_res.hubp->inst; | |
478 | struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; | |
479 | bool result = true; | |
285a7054 | 480 | const struct pwl_params *lut_params = NULL; |
90f33674 ML |
481 | |
482 | // 1D LUT | |
285a7054 AL |
483 | if (plane_state->blend_tf.type == TF_TYPE_HWPWL) |
484 | lut_params = &plane_state->blend_tf.pwl; | |
485 | else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { | |
c53bb80f | 486 | result = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf, |
285a7054 | 487 | &dpp_base->regamma_params, false); |
c53bb80f AH |
488 | if (!result) |
489 | return result; | |
490 | ||
285a7054 | 491 | lut_params = &dpp_base->regamma_params; |
90f33674 | 492 | } |
c53bb80f | 493 | mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); |
85ca6e85 | 494 | lut_params = NULL; |
90f33674 ML |
495 | |
496 | // Shaper | |
285a7054 AL |
497 | if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL) |
498 | lut_params = &plane_state->in_shaper_func.pwl; | |
499 | else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { | |
500 | // TODO: dpp_base replace | |
501 | ASSERT(false); | |
502 | cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func, | |
503 | &dpp_base->shaper_params, true); | |
504 | lut_params = &dpp_base->shaper_params; | |
90f33674 ML |
505 | } |
506 | ||
c53bb80f | 507 | mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); |
90f33674 ML |
508 | |
509 | // 3D | |
285a7054 AL |
510 | if (plane_state->lut3d_func.state.bits.initialized == 1) |
511 | result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id); | |
90f33674 ML |
512 | else |
513 | result = mpc->funcs->program_3dlut(mpc, NULL, mpcc_id); | |
514 | ||
515 | return result; | |
516 | } | |
517 | ||
518 | bool dcn32_set_input_transfer_func(struct dc *dc, | |
519 | struct pipe_ctx *pipe_ctx, | |
520 | const struct dc_plane_state *plane_state) | |
521 | { | |
522 | struct dce_hwseq *hws = dc->hwseq; | |
523 | struct mpc *mpc = dc->res_pool->mpc; | |
524 | struct dpp *dpp_base = pipe_ctx->plane_res.dpp; | |
525 | ||
526 | enum dc_transfer_func_predefined tf; | |
527 | bool result = true; | |
285a7054 | 528 | const struct pwl_params *params = NULL; |
90f33674 ML |
529 | |
530 | if (mpc == NULL || plane_state == NULL) | |
531 | return false; | |
532 | ||
533 | tf = TRANSFER_FUNCTION_UNITY; | |
534 | ||
285a7054 AL |
535 | if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED) |
536 | tf = plane_state->in_transfer_func.tf; | |
90f33674 ML |
537 | |
538 | dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf); | |
539 | ||
285a7054 AL |
540 | if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) |
541 | params = &plane_state->in_transfer_func.pwl; | |
542 | else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && | |
543 | cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func, | |
544 | &dpp_base->degamma_params, false)) | |
545 | params = &dpp_base->degamma_params; | |
90f33674 | 546 | |
9691a7a7 | 547 | dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); |
90f33674 | 548 | |
9691a7a7 | 549 | if (pipe_ctx->stream_res.opp && |
90f33674 ML |
550 | pipe_ctx->stream_res.opp->ctx && |
551 | hws->funcs.set_mcm_luts) | |
552 | result = hws->funcs.set_mcm_luts(pipe_ctx, plane_state); | |
553 | ||
554 | return result; | |
555 | } | |
556 | ||
235c6763 AP |
557 | bool dcn32_set_output_transfer_func(struct dc *dc, |
558 | struct pipe_ctx *pipe_ctx, | |
559 | const struct dc_stream_state *stream) | |
560 | { | |
561 | int mpcc_id = pipe_ctx->plane_res.hubp->inst; | |
562 | struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; | |
285a7054 | 563 | const struct pwl_params *params = NULL; |
235c6763 AP |
564 | bool ret = false; |
565 | ||
566 | /* program OGAM or 3DLUT only for the top pipe*/ | |
53f32880 | 567 | if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) { |
235c6763 AP |
568 | /*program shaper and 3dlut in MPC*/ |
569 | ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream); | |
285a7054 AL |
570 | if (ret == false && mpc->funcs->set_output_gamma) { |
571 | if (stream->out_transfer_func.type == TF_TYPE_HWPWL) | |
572 | params = &stream->out_transfer_func.pwl; | |
573 | else if (pipe_ctx->stream->out_transfer_func.type == | |
235c6763 AP |
574 | TF_TYPE_DISTRIBUTED_POINTS && |
575 | cm3_helper_translate_curve_to_hw_format( | |
285a7054 | 576 | &stream->out_transfer_func, |
235c6763 AP |
577 | &mpc->blender_params, false)) |
578 | params = &mpc->blender_params; | |
b3235e86 | 579 | /* there are no ROM LUTs in OUTGAM */ |
285a7054 | 580 | if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED) |
b3235e86 | 581 | BREAK_TO_DEBUGGER(); |
235c6763 AP |
582 | } |
583 | } | |
584 | ||
28574b08 SS |
585 | if (mpc->funcs->set_output_gamma) |
586 | mpc->funcs->set_output_gamma(mpc, mpcc_id, params); | |
587 | ||
235c6763 AP |
588 | return ret; |
589 | } | |
590 | ||
4ed79308 | 591 | /* Program P-State force value according to if pipe is using SubVP / FPO or not: |
235c6763 AP |
592 | * 1. Reset P-State force on all pipes first |
593 | * 2. For each main pipe, force P-State disallow (P-State allow moderated by DMUB) | |
594 | */ | |
4ed79308 | 595 | void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) |
235c6763 AP |
596 | { |
597 | int i; | |
4ed79308 AL |
598 | |
599 | /* Unforce p-state for each pipe if it is not FPO or SubVP. | |
600 | * For FPO and SubVP, if it's already forced disallow, leave | |
601 | * it as disallow. | |
235c6763 AP |
602 | */ |
603 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
604 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; | |
605 | struct hubp *hubp = pipe->plane_res.hubp; | |
e69d4335 AL |
606 | struct dc_stream_status *stream_status = NULL; |
607 | ||
608 | if (pipe->stream) | |
609 | stream_status = dc_state_get_stream_status(context, pipe->stream); | |
235c6763 | 610 | |
09a4ec5d | 611 | if (!pipe->stream || !(dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN || |
e69d4335 | 612 | (stream_status && stream_status->fpo_in_use))) { |
4ed79308 AL |
613 | if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) |
614 | hubp->funcs->hubp_update_force_pstate_disallow(hubp, false); | |
615 | if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow) | |
616 | hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, false); | |
617 | } | |
235c6763 AP |
618 | } |
619 | ||
235c6763 AP |
620 | /* Loop through each pipe -- for each subvp main pipe force p-state allow equal to false. |
621 | */ | |
622 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
623 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; | |
4621e10e | 624 | struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
4ed79308 | 625 | struct hubp *hubp = pipe->plane_res.hubp; |
e69d4335 AL |
626 | struct dc_stream_status *stream_status = NULL; |
627 | struct dc_stream_status *old_stream_status = NULL; | |
235c6763 | 628 | |
4621e10e AL |
629 | /* Today for MED update type we do not call update clocks. However, for FPO |
630 | * the assumption is that update clocks should be called to disable P-State | |
631 | * switch before any HW programming since FPO in FW and driver are not | |
632 | * synchronized. This causes an issue where on a MED update, an FPO P-State | |
633 | * switch could be taking place, then driver forces P-State disallow in the below | |
634 | * code and prevents FPO from completing the sequence. In this case we add a check | |
635 | * to avoid re-programming (and thus re-setting) the P-State force register by | |
636 | * only reprogramming if the pipe was not previously Subvp or FPO. The assumption | |
637 | * is that the P-State force register should be programmed correctly the first | |
638 | * time SubVP / FPO was enabled, so there's no need to update / reset it if the | |
639 | * pipe config has never exited SubVP / FPO. | |
640 | */ | |
e69d4335 AL |
641 | if (pipe->stream) |
642 | stream_status = dc_state_get_stream_status(context, pipe->stream); | |
643 | if (old_pipe->stream) | |
644 | old_stream_status = dc_state_get_stream_status(dc->current_state, old_pipe->stream); | |
645 | ||
09a4ec5d | 646 | if (pipe->stream && (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN || |
e69d4335 | 647 | (stream_status && stream_status->fpo_in_use)) && |
5e211d2c | 648 | (!old_pipe->stream || (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) != SUBVP_MAIN && |
e69d4335 | 649 | (old_stream_status && !old_stream_status->fpo_in_use)))) { |
235c6763 AP |
650 | if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) |
651 | hubp->funcs->hubp_update_force_pstate_disallow(hubp, true); | |
4ed79308 AL |
652 | if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow) |
653 | hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, true); | |
235c6763 AP |
654 | } |
655 | } | |
656 | } | |
657 | ||
658 | /* Update MALL_SEL register based on if pipe / plane | |
659 | * is a phantom pipe, main pipe, and if using MALL | |
660 | * for SS. | |
661 | */ | |
662 | void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context) | |
663 | { | |
664 | int i; | |
665 | unsigned int num_ways = dcn32_calculate_cab_allocation(dc, context); | |
4074f96d | 666 | bool cache_cursor = false; |
235c6763 AP |
667 | |
668 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
669 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; | |
670 | struct hubp *hubp = pipe->plane_res.hubp; | |
671 | ||
672 | if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) { | |
57b014f6 | 673 | int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; |
39fc82b7 TH |
674 | |
675 | switch (hubp->curs_attr.color_format) { | |
676 | case CURSOR_MODE_MONO: | |
677 | cursor_size /= 2; | |
678 | break; | |
679 | case CURSOR_MODE_COLOR_1BIT_AND: | |
680 | case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: | |
681 | case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: | |
682 | cursor_size *= 4; | |
683 | break; | |
684 | ||
685 | case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: | |
686 | case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: | |
687 | default: | |
688 | cursor_size *= 8; | |
689 | break; | |
690 | } | |
691 | ||
692 | if (cursor_size > 16384) | |
4074f96d CP |
693 | cache_cursor = true; |
694 | ||
09a4ec5d DV |
695 | if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { |
696 | hubp->funcs->hubp_update_mall_sel(hubp, 1, false); | |
235c6763 | 697 | } else { |
f0068dd0 | 698 | // MALL not supported with Stereo3D |
235c6763 AP |
699 | hubp->funcs->hubp_update_mall_sel(hubp, |
700 | num_ways <= dc->caps.cache_num_ways && | |
f0068dd0 | 701 | pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED && |
a6135dde AL |
702 | pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO && |
703 | !pipe->plane_state->address.tmz_surface ? 2 : 0, | |
4074f96d | 704 | cache_cursor); |
235c6763 AP |
705 | } |
706 | } | |
707 | } | |
708 | } | |
709 | ||
710 | /* Program the sub-viewport pipe configuration after the main / phantom pipes | |
711 | * have been programmed in hardware. | |
712 | * 1. Update force P-State for all the main pipes (disallow P-state) | |
713 | * 2. Update MALL_SEL register | |
714 | * 3. Program FORCE_ONE_ROW_FOR_FRAME for main subvp pipes | |
715 | */ | |
716 | void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context) | |
717 | { | |
718 | int i; | |
719 | struct dce_hwseq *hws = dc->hwseq; | |
85f4bc0c AL |
720 | |
721 | // Don't force p-state disallow -- can't block dummy p-state | |
235c6763 AP |
722 | |
723 | // Update MALL_SEL register for each pipe | |
724 | if (hws && hws->funcs.update_mall_sel) | |
725 | hws->funcs.update_mall_sel(dc, context); | |
726 | ||
727 | // Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes | |
728 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
729 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; | |
730 | struct hubp *hubp = pipe->plane_res.hubp; | |
731 | ||
732 | if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) { | |
e87b92c6 ST |
733 | /* TODO - remove setting CURSOR_REQ_MODE to 0 for legacy cases |
734 | * - need to investigate single pipe MPO + SubVP case to | |
735 | * see if CURSOR_REQ_MODE will be back to 1 for SubVP | |
736 | * when it should be 0 for MPO | |
737 | */ | |
09a4ec5d | 738 | if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) |
235c6763 | 739 | hubp->funcs->hubp_prepare_subvp_buffering(hubp, true); |
235c6763 AP |
740 | } |
741 | } | |
742 | } | |
743 | ||
58330ef1 AL |
744 | static void dcn32_initialize_min_clocks(struct dc *dc) |
745 | { | |
746 | struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk; | |
747 | ||
01ecd870 | 748 | clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ; |
58330ef1 AL |
749 | clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000; |
750 | clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000; | |
751 | clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000; | |
752 | clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000; | |
d11dfbec AL |
753 | clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000; |
754 | clocks->fclk_p_state_change_support = true; | |
755 | clocks->p_state_change_support = true; | |
d170e938 AL |
756 | if (dc->debug.disable_boot_optimizations) { |
757 | clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000; | |
758 | } else { | |
759 | /* Even though DPG_EN = 1 for the connected display, it still requires the | |
760 | * correct timing so we cannot set DISPCLK to min freq or it could cause | |
761 | * audio corruption. Read current DISPCLK from DENTIST and request the same | |
762 | * freq to ensure that the timing is valid and unchanged. | |
763 | */ | |
764 | clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr); | |
d170e938 | 765 | } |
58330ef1 AL |
766 | |
767 | dc->clk_mgr->funcs->update_clocks( | |
768 | dc->clk_mgr, | |
769 | dc->current_state, | |
770 | true); | |
771 | } | |
772 | ||
235c6763 AP |
773 | void dcn32_init_hw(struct dc *dc) |
774 | { | |
775 | struct abm **abms = dc->res_pool->multiple_abms; | |
776 | struct dce_hwseq *hws = dc->hwseq; | |
777 | struct dc_bios *dcb = dc->ctx->dc_bios; | |
778 | struct resource_pool *res_pool = dc->res_pool; | |
779 | int i; | |
780 | int edp_num; | |
781 | uint32_t backlight = MAX_BACKLIGHT_LEVEL; | |
ee8ed250 | 782 | uint32_t user_level = MAX_BACKLIGHT_LEVEL; |
235c6763 | 783 | |
c395fd47 | 784 | if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) |
235c6763 AP |
785 | dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); |
786 | ||
787 | // Initialize the dccg | |
788 | if (res_pool->dccg->funcs->dccg_init) | |
789 | res_pool->dccg->funcs->dccg_init(res_pool->dccg); | |
790 | ||
791 | if (!dcb->funcs->is_accelerated_mode(dcb)) { | |
792 | hws->funcs.bios_golden_init(dc); | |
793 | hws->funcs.disable_vga(dc->hwseq); | |
794 | } | |
795 | ||
796 | // Set default OPTC memory power states | |
797 | if (dc->debug.enable_mem_low_power.bits.optc) { | |
798 | // Shutdown when unassigned and light sleep in VBLANK | |
799 | REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); | |
800 | } | |
801 | ||
802 | if (dc->debug.enable_mem_low_power.bits.vga) { | |
803 | // Power down VGA memory | |
804 | REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); | |
805 | } | |
806 | ||
807 | if (dc->ctx->dc_bios->fw_info_valid) { | |
808 | res_pool->ref_clocks.xtalin_clock_inKhz = | |
809 | dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; | |
810 | ||
1a664dc0 | 811 | if (res_pool->hubbub) { |
235c6763 AP |
812 | (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, |
813 | dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, | |
814 | &res_pool->ref_clocks.dccg_ref_clock_inKhz); | |
815 | ||
816 | (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, | |
817 | res_pool->ref_clocks.dccg_ref_clock_inKhz, | |
818 | &res_pool->ref_clocks.dchub_ref_clock_inKhz); | |
819 | } else { | |
820 | // Not all ASICs have DCCG sw component | |
821 | res_pool->ref_clocks.dccg_ref_clock_inKhz = | |
822 | res_pool->ref_clocks.xtalin_clock_inKhz; | |
823 | res_pool->ref_clocks.dchub_ref_clock_inKhz = | |
824 | res_pool->ref_clocks.xtalin_clock_inKhz; | |
825 | } | |
826 | } else | |
827 | ASSERT_CRITICAL(false); | |
828 | ||
829 | for (i = 0; i < dc->link_count; i++) { | |
830 | /* Power up AND update implementation according to the | |
831 | * required signal (which may be different from the | |
832 | * default signal on connector). | |
833 | */ | |
834 | struct dc_link *link = dc->links[i]; | |
835 | ||
836 | link->link_enc->funcs->hw_init(link->link_enc); | |
837 | ||
838 | /* Check for enabled DIG to identify enabled display */ | |
839 | if (link->link_enc->funcs->is_dig_enabled && | |
6ea843e0 | 840 | link->link_enc->funcs->is_dig_enabled(link->link_enc)) { |
235c6763 | 841 | link->link_status.link_active = true; |
9c75891f | 842 | link->phy_state.symclk_state = SYMCLK_ON_TX_ON; |
6ea843e0 JZ |
843 | if (link->link_enc->funcs->fec_is_active && |
844 | link->link_enc->funcs->fec_is_active(link->link_enc)) | |
845 | link->fec_state = dc_link_fec_enabled; | |
846 | } | |
235c6763 AP |
847 | } |
848 | ||
504d3cae HW |
849 | /* enable_power_gating_plane before dsc_pg_control because |
850 | * FORCEON = 1 with hw default value on bootup, resume from s3 | |
851 | */ | |
852 | if (hws->funcs.enable_power_gating_plane) | |
853 | hws->funcs.enable_power_gating_plane(dc->hwseq, true); | |
235c6763 AP |
854 | |
855 | /* we want to turn off all dp displays before doing detection */ | |
98ce7d32 | 856 | dc->link_srv->blank_all_dp_displays(dc); |
235c6763 AP |
857 | |
858 | /* If taking control over from VBIOS, we may want to optimize our first | |
859 | * mode set, so we need to skip powering down pipes until we know which | |
860 | * pipes we want to use. | |
861 | * Otherwise, if taking control is not possible, we need to power | |
862 | * everything down. | |
863 | */ | |
3e80a5b0 | 864 | if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { |
d170e938 AL |
865 | /* Disable boot optimizations means power down everything including PHY, DIG, |
866 | * and OTG (i.e. the boot is not optimized because we do a full power down). | |
867 | */ | |
868 | if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations) | |
869 | dc->hwss.enable_accelerated_mode(dc, dc->current_state); | |
870 | else | |
871 | hws->funcs.init_pipes(dc, dc->current_state); | |
872 | ||
235c6763 AP |
873 | if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) |
874 | dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, | |
875 | !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); | |
58330ef1 AL |
876 | |
877 | dcn32_initialize_min_clocks(dc); | |
174a05af AL |
878 | |
879 | /* On HW init, allow idle optimizations after pipes have been turned off. | |
880 | * | |
881 | * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state | |
882 | * is reset (i.e. not in idle at the time hw init is called), but software state | |
883 | * still has idle_optimizations = true, so we must disable idle optimizations first | |
884 | * (i.e. set false), then re-enable (set true). | |
885 | */ | |
886 | dc_allow_idle_optimizations(dc, false); | |
887 | dc_allow_idle_optimizations(dc, true); | |
235c6763 AP |
888 | } |
889 | ||
890 | /* In headless boot cases, DIG may be turned | |
891 | * on which causes HW/SW discrepancies. | |
892 | * To avoid this, power down hardware on boot | |
893 | * if DIG is turned on and seamless boot not enabled | |
894 | */ | |
3e80a5b0 | 895 | if (!dc->config.seamless_boot_edp_requested) { |
235c6763 AP |
896 | struct dc_link *edp_links[MAX_NUM_EDP]; |
897 | struct dc_link *edp_link; | |
898 | ||
7ae1dbe6 | 899 | dc_get_edp_links(dc, edp_links, &edp_num); |
235c6763 AP |
900 | if (edp_num) { |
901 | for (i = 0; i < edp_num; i++) { | |
902 | edp_link = edp_links[i]; | |
903 | if (edp_link->link_enc->funcs->is_dig_enabled && | |
904 | edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && | |
905 | dc->hwss.edp_backlight_control && | |
87325940 | 906 | hws->funcs.power_down && |
235c6763 AP |
907 | dc->hwss.edp_power_control) { |
908 | dc->hwss.edp_backlight_control(edp_link, false); | |
87325940 | 909 | hws->funcs.power_down(dc); |
235c6763 AP |
910 | dc->hwss.edp_power_control(edp_link, false); |
911 | } | |
912 | } | |
913 | } else { | |
914 | for (i = 0; i < dc->link_count; i++) { | |
915 | struct dc_link *link = dc->links[i]; | |
916 | ||
917 | if (link->link_enc->funcs->is_dig_enabled && | |
918 | link->link_enc->funcs->is_dig_enabled(link->link_enc) && | |
87325940 JA |
919 | hws->funcs.power_down) { |
920 | hws->funcs.power_down(dc); | |
235c6763 AP |
921 | break; |
922 | } | |
923 | ||
924 | } | |
925 | } | |
926 | } | |
927 | ||
928 | for (i = 0; i < res_pool->audio_count; i++) { | |
929 | struct audio *audio = res_pool->audios[i]; | |
930 | ||
931 | audio->funcs->hw_init(audio); | |
932 | } | |
933 | ||
934 | for (i = 0; i < dc->link_count; i++) { | |
935 | struct dc_link *link = dc->links[i]; | |
936 | ||
ee8ed250 | 937 | if (link->panel_cntl) { |
235c6763 | 938 | backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); |
ee8ed250 CC |
939 | user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; |
940 | } | |
235c6763 AP |
941 | } |
942 | ||
943 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
944 | if (abms[i] != NULL && abms[i]->funcs != NULL) | |
ee8ed250 | 945 | abms[i]->funcs->abm_init(abms[i], backlight, user_level); |
235c6763 AP |
946 | } |
947 | ||
948 | /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ | |
949 | REG_WRITE(DIO_MEM_PWR_CTRL, 0); | |
950 | ||
951 | if (!dc->debug.disable_clock_gate) { | |
952 | /* enable all DCN clock gating */ | |
953 | REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); | |
954 | ||
955 | REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); | |
956 | ||
957 | REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); | |
958 | } | |
235c6763 AP |
959 | |
960 | if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) | |
961 | dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); | |
962 | ||
c395fd47 | 963 | if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges) |
235c6763 AP |
964 | dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); |
965 | ||
c395fd47 SS |
966 | if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk && |
967 | !dc->clk_mgr->dc_mode_softmax_enabled) | |
235c6763 AP |
968 | dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); |
969 | ||
970 | if (dc->res_pool->hubbub->funcs->force_pstate_change_control) | |
971 | dc->res_pool->hubbub->funcs->force_pstate_change_control( | |
972 | dc->res_pool->hubbub, false, false); | |
973 | ||
974 | if (dc->res_pool->hubbub->funcs->init_crb) | |
975 | dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); | |
976 | ||
d97fd7a0 JL |
977 | if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0) |
978 | dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc); | |
979 | ||
235c6763 | 980 | // Get DMCUB capabilities |
8b3120df | 981 | if (dc->ctx->dmub_srv) { |
e97cc04f | 982 | dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); |
8b3120df | 983 | dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; |
b058e399 | 984 | dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support; |
1938bcdc | 985 | dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable; |
89e5f42c | 986 | dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; |
65550a9c | 987 | |
17b6527d RS |
988 | /* for DCN401 testing only */ |
989 | dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; | |
990 | if (dc->caps.dmub_caps.fams_ver == 2) { | |
991 | /* FAMS2 is enabled */ | |
992 | dc->debug.fams2_config.bits.enable &= true; | |
993 | } else if (dc->ctx->dmub_srv->dmub->fw_version < | |
5324e2b2 | 994 | DMUB_FW_VERSION(7, 0, 35)) { |
17b6527d RS |
995 | /* FAMS2 is disabled */ |
996 | dc->debug.fams2_config.bits.enable = false; | |
997 | if (dc->debug.using_dml2 && dc->res_pool->funcs->update_bw_bounding_box) { | |
998 | /* update bounding box if FAMS2 disabled */ | |
999 | dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); | |
1000 | } | |
65550a9c HM |
1001 | dc->debug.force_disable_subvp = true; |
1002 | dc->debug.disable_fpo_optimizations = true; | |
1003 | } | |
8b3120df | 1004 | } |
235c6763 AP |
1005 | } |
1006 | ||
176278d8 | 1007 | void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) |
235c6763 AP |
1008 | { |
1009 | struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; | |
08a32add | 1010 | struct dc *dc = pipe_ctx->stream->ctx->dc; |
235c6763 AP |
1011 | struct dc_stream_state *stream = pipe_ctx->stream; |
1012 | struct pipe_ctx *odm_pipe; | |
1013 | int opp_cnt = 1; | |
08a32add WL |
1014 | struct dccg *dccg = dc->res_pool->dccg; |
1015 | /* It has been found that when DSCCLK is lower than 16Mhz, we will get DCN | |
1016 | * register access hung. When DSCCLk is based on refclk, DSCCLk is always a | |
1017 | * fixed value higher than 16Mhz so the issue doesn't occur. When DSCCLK is | |
1018 | * generated by DTO, DSCCLK would be based on 1/3 dispclk. For small timings | |
1019 | * with DSC such as 480p60Hz, the dispclk could be low enough to trigger | |
1020 | * this problem. We are implementing a workaround here to keep using dscclk | |
1021 | * based on fixed value refclk when timing is smaller than 3x16Mhz (i.e | |
1022 | * 48Mhz) pixel clock to avoid hitting this problem. | |
1023 | */ | |
1024 | bool should_use_dto_dscclk = (dccg->funcs->set_dto_dscclk != NULL) && | |
1025 | stream->timing.pix_clk_100hz > 480000; | |
235c6763 AP |
1026 | |
1027 | ASSERT(dsc); | |
1028 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) | |
1029 | opp_cnt++; | |
1030 | ||
1031 | if (enable) { | |
1032 | struct dsc_config dsc_cfg; | |
f851b078 | 1033 | struct dsc_optc_config dsc_optc_cfg = {0}; |
235c6763 AP |
1034 | enum optc_dsc_mode optc_dsc_mode; |
1035 | ||
1036 | /* Enable DSC hw block */ | |
1037 | dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; | |
1038 | dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; | |
1039 | dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; | |
1040 | dsc_cfg.color_depth = stream->timing.display_color_depth; | |
1041 | dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; | |
1042 | dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; | |
1043 | ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); | |
1044 | dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; | |
1045 | ||
3c915431 WL |
1046 | if (should_use_dto_dscclk) |
1047 | dccg->funcs->set_dto_dscclk(dccg, dsc->inst); | |
235c6763 AP |
1048 | dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); |
1049 | dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); | |
1050 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { | |
1051 | struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; | |
1052 | ||
1053 | ASSERT(odm_dsc); | |
3c915431 WL |
1054 | if (should_use_dto_dscclk) |
1055 | dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); | |
235c6763 AP |
1056 | odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); |
1057 | odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); | |
1058 | } | |
235c6763 | 1059 | optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; |
235c6763 AP |
1060 | /* Enable DSC in OPTC */ |
1061 | DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst); | |
1062 | pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg, | |
1063 | optc_dsc_mode, | |
1064 | dsc_optc_cfg.bytes_per_pixel, | |
1065 | dsc_optc_cfg.slice_width); | |
1066 | } else { | |
1067 | /* disable DSC in OPTC */ | |
1068 | pipe_ctx->stream_res.tg->funcs->set_dsc_config( | |
1069 | pipe_ctx->stream_res.tg, | |
1070 | OPTC_DSC_DISABLED, 0, 0); | |
1071 | ||
176278d8 | 1072 | /* only disconnect DSC block, DSC is disabled when OPP head pipe is reset */ |
3c915431 | 1073 | dsc->funcs->dsc_disconnect(pipe_ctx->stream_res.dsc); |
235c6763 AP |
1074 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { |
1075 | ASSERT(odm_pipe->stream_res.dsc); | |
176278d8 | 1076 | odm_pipe->stream_res.dsc->funcs->dsc_disconnect(odm_pipe->stream_res.dsc); |
235c6763 AP |
1077 | } |
1078 | } | |
1079 | } | |
1080 | ||
49f59499 JL |
1081 | /* |
1082 | * Given any pipe_ctx, return the total ODM combine factor, and optionally return | |
1083 | * the OPPids which are used | |
1084 | * */ | |
1085 | static unsigned int get_odm_config(struct pipe_ctx *pipe_ctx, unsigned int *opp_instances) | |
1086 | { | |
1087 | unsigned int opp_count = 1; | |
1088 | struct pipe_ctx *odm_pipe; | |
1089 | ||
1090 | /* First get to the top pipe */ | |
1091 | for (odm_pipe = pipe_ctx; odm_pipe->prev_odm_pipe; odm_pipe = odm_pipe->prev_odm_pipe) | |
1092 | ; | |
1093 | ||
1094 | /* First pipe is always used */ | |
1095 | if (opp_instances) | |
1096 | opp_instances[0] = odm_pipe->stream_res.opp->inst; | |
1097 | ||
1098 | /* Find and count odm pipes, if any */ | |
1099 | for (odm_pipe = odm_pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { | |
1100 | if (opp_instances) | |
1101 | opp_instances[opp_count] = odm_pipe->stream_res.opp->inst; | |
1102 | opp_count++; | |
1103 | } | |
1104 | ||
1105 | return opp_count; | |
1106 | } | |
1107 | ||
235c6763 AP |
1108 | void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) |
1109 | { | |
1110 | struct pipe_ctx *odm_pipe; | |
49f59499 JL |
1111 | int opp_cnt = 0; |
1112 | int opp_inst[MAX_PIPES] = {0}; | |
f9d48a88 WL |
1113 | int odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false); |
1114 | int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true); | |
235c6763 | 1115 | |
49f59499 | 1116 | opp_cnt = get_odm_config(pipe_ctx, opp_inst); |
235c6763 AP |
1117 | |
1118 | if (opp_cnt > 1) | |
1119 | pipe_ctx->stream_res.tg->funcs->set_odm_combine( | |
1120 | pipe_ctx->stream_res.tg, | |
1121 | opp_inst, opp_cnt, | |
f9d48a88 | 1122 | odm_slice_width, last_odm_slice_width); |
235c6763 AP |
1123 | else |
1124 | pipe_ctx->stream_res.tg->funcs->set_odm_bypass( | |
1125 | pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); | |
1126 | ||
235c6763 AP |
1127 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { |
1128 | odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( | |
1129 | odm_pipe->stream_res.opp, | |
1130 | true); | |
f9d48a88 WL |
1131 | odm_pipe->stream_res.opp->funcs->opp_program_left_edge_extra_pixel( |
1132 | odm_pipe->stream_res.opp, | |
1133 | pipe_ctx->stream->timing.pixel_encoding, | |
1134 | resource_is_pipe_type(odm_pipe, OTG_MASTER)); | |
235c6763 AP |
1135 | } |
1136 | ||
07ebc18c RS |
1137 | if (pipe_ctx->stream_res.dsc) { |
1138 | struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; | |
235c6763 | 1139 | |
176278d8 | 1140 | dcn32_update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC); |
07ebc18c RS |
1141 | |
1142 | /* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */ | |
1143 | if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe && | |
1144 | current_pipe_ctx->next_odm_pipe->stream_res.dsc) { | |
1145 | struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc; | |
08a32add | 1146 | |
07ebc18c RS |
1147 | /* disconnect DSC block from stream */ |
1148 | dsc->funcs->dsc_disconnect(dsc); | |
1149 | } | |
1150 | } | |
86e9523f WL |
1151 | |
1152 | if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE)) | |
1153 | /* | |
1154 | * blank pattern is generated by OPP, reprogram blank pattern | |
1155 | * due to OPP count change | |
1156 | */ | |
1157 | dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true); | |
235c6763 AP |
1158 | } |
1159 | ||
effee878 | 1160 | unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div) |
49f59499 JL |
1161 | { |
1162 | struct dc_stream_state *stream = pipe_ctx->stream; | |
effee878 | 1163 | unsigned int odm_combine_factor = 0; |
ffccfdba | 1164 | bool two_pix_per_container = false; |
49f59499 | 1165 | |
e6a901a0 | 1166 | two_pix_per_container = pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing); |
effee878 | 1167 | odm_combine_factor = get_odm_config(pipe_ctx, NULL); |
49f59499 | 1168 | |
98ce7d32 | 1169 | if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { |
e3aa827e | 1170 | *k1_div = PIXEL_RATE_DIV_BY_1; |
49f59499 | 1171 | *k2_div = PIXEL_RATE_DIV_BY_1; |
3b214bb7 | 1172 | } else if (dc_is_hdmi_tmds_signal(stream->signal) || dc_is_dvi_signal(stream->signal)) { |
49f59499 JL |
1173 | *k1_div = PIXEL_RATE_DIV_BY_1; |
1174 | if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) | |
1175 | *k2_div = PIXEL_RATE_DIV_BY_2; | |
1176 | else | |
1177 | *k2_div = PIXEL_RATE_DIV_BY_4; | |
74fa4c81 | 1178 | } else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) { |
2b2b3a75 | 1179 | if (two_pix_per_container) { |
49f59499 JL |
1180 | *k1_div = PIXEL_RATE_DIV_BY_1; |
1181 | *k2_div = PIXEL_RATE_DIV_BY_2; | |
49f59499 | 1182 | } else { |
2b2b3a75 RS |
1183 | *k1_div = PIXEL_RATE_DIV_BY_1; |
1184 | *k2_div = PIXEL_RATE_DIV_BY_4; | |
effee878 | 1185 | if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) |
49f59499 JL |
1186 | *k2_div = PIXEL_RATE_DIV_BY_2; |
1187 | } | |
1188 | } | |
1189 | ||
1190 | if ((*k1_div == PIXEL_RATE_DIV_NA) && (*k2_div == PIXEL_RATE_DIV_NA)) | |
1191 | ASSERT(false); | |
effee878 DL |
1192 | |
1193 | return odm_combine_factor; | |
49f59499 | 1194 | } |
64a30aaf | 1195 | |
975507d7 | 1196 | void dcn32_calculate_pix_rate_divider( |
1197 | struct dc *dc, | |
1198 | struct dc_state *context, | |
1199 | const struct dc_stream_state *stream) | |
1200 | { | |
1201 | struct dce_hwseq *hws = dc->hwseq; | |
1202 | struct pipe_ctx *pipe_ctx = NULL; | |
1203 | unsigned int k1_div = PIXEL_RATE_DIV_NA; | |
1204 | unsigned int k2_div = PIXEL_RATE_DIV_NA; | |
1205 | ||
1206 | pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); | |
1207 | ||
1208 | if (pipe_ctx) { | |
1209 | ||
1210 | if (hws->funcs.calculate_dccg_k1_k2_values) | |
1211 | hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div); | |
1212 | ||
1213 | pipe_ctx->pixel_rate_divider.div_factor1 = k1_div; | |
1214 | pipe_ctx->pixel_rate_divider.div_factor2 = k2_div; | |
1215 | } | |
1216 | } | |
1217 | ||
b3c9c9af | 1218 | void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx) |
2da3556c | 1219 | { |
490ddccb | 1220 | unsigned int i; |
2da3556c SR |
1221 | struct pipe_ctx *pipe = NULL; |
1222 | bool otg_disabled[MAX_PIPES] = {false}; | |
b3c9c9af | 1223 | struct dc_state *dc_state = NULL; |
2da3556c SR |
1224 | |
1225 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
b3c9c9af AL |
1226 | if (i <= current_pipe_idx) { |
1227 | pipe = &context->res_ctx.pipe_ctx[i]; | |
1228 | dc_state = context; | |
1229 | } else { | |
1230 | pipe = &dc->current_state->res_ctx.pipe_ctx[i]; | |
1231 | dc_state = dc->current_state; | |
1232 | } | |
2da3556c | 1233 | |
53f32880 | 1234 | if (!resource_is_pipe_type(pipe, OTG_MASTER)) |
2da3556c SR |
1235 | continue; |
1236 | ||
53f32880 | 1237 | if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) |
b3c9c9af | 1238 | && dc_state_get_pipe_subvp_type(dc_state, pipe) != SUBVP_PHANTOM) { |
2da3556c SR |
1239 | pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); |
1240 | reset_sync_context_for_pipe(dc, context, i); | |
1241 | otg_disabled[i] = true; | |
1242 | } | |
1243 | } | |
1244 | ||
1245 | hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg); | |
1246 | ||
1247 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
b3c9c9af AL |
1248 | if (i <= current_pipe_idx) |
1249 | pipe = &context->res_ctx.pipe_ctx[i]; | |
1250 | else | |
1251 | pipe = &dc->current_state->res_ctx.pipe_ctx[i]; | |
2da3556c | 1252 | |
c76f56f2 AL |
1253 | if (otg_disabled[i]) { |
1254 | int opp_inst[MAX_PIPES] = { pipe->stream_res.opp->inst }; | |
1255 | int opp_cnt = 1; | |
1256 | int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe, true); | |
1257 | int odm_slice_width = resource_get_odm_slice_dst_width(pipe, false); | |
1258 | struct pipe_ctx *odm_pipe; | |
1259 | ||
1260 | for (odm_pipe = pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { | |
1261 | opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst; | |
1262 | opp_cnt++; | |
1263 | } | |
1264 | if (opp_cnt > 1) | |
1265 | pipe->stream_res.tg->funcs->set_odm_combine( | |
1266 | pipe->stream_res.tg, | |
1267 | opp_inst, opp_cnt, | |
1268 | odm_slice_width, | |
1269 | last_odm_slice_width); | |
2da3556c | 1270 | pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); |
c76f56f2 | 1271 | } |
2da3556c | 1272 | } |
f86b47be TL |
1273 | |
1274 | dc_trigger_sync(dc, dc->current_state); | |
2da3556c SR |
1275 | } |
1276 | ||
88ef4c5b ST |
1277 | void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx, |
1278 | struct dc_link_settings *link_settings) | |
1279 | { | |
1280 | struct encoder_unblank_param params = {0}; | |
1281 | struct dc_stream_state *stream = pipe_ctx->stream; | |
1282 | struct dc_link *link = stream->link; | |
1283 | struct dce_hwseq *hws = link->dc->hwseq; | |
1284 | struct pipe_ctx *odm_pipe; | |
88ef4c5b ST |
1285 | |
1286 | params.opp_cnt = 1; | |
975507d7 | 1287 | params.pix_per_cycle = pipe_ctx->stream_res.pix_clk_params.dio_se_pix_per_cycle; |
1288 | ||
88ef4c5b ST |
1289 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) |
1290 | params.opp_cnt++; | |
1291 | ||
1292 | /* only 3 items below are used by unblank */ | |
1293 | params.timing = pipe_ctx->stream->timing; | |
1294 | ||
1295 | params.link_settings.link_rate = link_settings->link_rate; | |
1296 | ||
98ce7d32 | 1297 | if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { |
88ef4c5b ST |
1298 | /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ |
1299 | pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank( | |
1300 | pipe_ctx->stream_res.hpo_dp_stream_enc, | |
1301 | pipe_ctx->stream_res.tg->inst); | |
1302 | } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { | |
975507d7 | 1303 | if (pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing) || |
1304 | params.opp_cnt > 1) { | |
88ef4c5b | 1305 | params.timing.pix_clk_100hz /= 2; |
975507d7 | 1306 | params.pix_per_cycle = 2; |
532a0d2a | 1307 | } |
88ef4c5b | 1308 | pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( |
975507d7 | 1309 | pipe_ctx->stream_res.stream_enc, params.pix_per_cycle > 1); |
88ef4c5b ST |
1310 | pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); |
1311 | } | |
1312 | ||
1313 | if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) | |
1314 | hws->funcs.edp_backlight_control(link, true); | |
1315 | } | |
1316 | ||
1317 | bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx) | |
1318 | { | |
1319 | struct dc *dc = pipe_ctx->stream->ctx->dc; | |
1320 | ||
2d550a15 ML |
1321 | if (!is_h_timing_divisible_by_2(pipe_ctx->stream)) |
1322 | return false; | |
1323 | ||
98ce7d32 | 1324 | if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && |
88ef4c5b ST |
1325 | dc->debug.enable_dp_dig_pixel_rate_div_policy) |
1326 | return true; | |
1327 | return false; | |
1328 | } | |
594b237b | 1329 | |
9c75891f | 1330 | static void apply_symclk_on_tx_off_wa(struct dc_link *link) |
594b237b | 1331 | { |
9c75891f WL |
1332 | /* There are use cases where SYMCLK is referenced by OTG. For instance |
1333 | * for TMDS signal, OTG relies SYMCLK even if TX video output is off. | |
1334 | * However current link interface will power off PHY when disabling link | |
1335 | * output. This will turn off SYMCLK generated by PHY. The workaround is | |
1336 | * to identify such case where SYMCLK is still in use by OTG when we | |
1337 | * power off PHY. When this is detected, we will temporarily power PHY | |
1338 | * back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling | |
1339 | * program_pix_clk interface. When OTG is disabled, we will then power | |
1340 | * off PHY by calling disable link output again. | |
1341 | * | |
1342 | * In future dcn generations, we plan to rework transmitter control | |
1343 | * interface so that we could have an option to set SYMCLK ON TX OFF | |
1344 | * state in one step without this workaround | |
1345 | */ | |
1346 | ||
1347 | struct dc *dc = link->ctx->dc; | |
1348 | struct pipe_ctx *pipe_ctx = NULL; | |
1349 | uint8_t i; | |
1350 | ||
1351 | if (link->phy_state.symclk_ref_cnts.otg > 0) { | |
1352 | for (i = 0; i < MAX_PIPES; i++) { | |
1353 | pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; | |
53f32880 | 1354 | if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && pipe_ctx->stream->link == link) { |
9c75891f WL |
1355 | pipe_ctx->clock_source->funcs->program_pix_clk( |
1356 | pipe_ctx->clock_source, | |
1357 | &pipe_ctx->stream_res.pix_clk_params, | |
98ce7d32 WL |
1358 | dc->link_srv->dp_get_encoding_format( |
1359 | &pipe_ctx->link_config.dp_link_settings), | |
9c75891f WL |
1360 | &pipe_ctx->pll_settings); |
1361 | link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; | |
1362 | break; | |
1363 | } | |
594b237b | 1364 | } |
9c75891f WL |
1365 | } |
1366 | } | |
594b237b | 1367 | |
9c75891f WL |
1368 | void dcn32_disable_link_output(struct dc_link *link, |
1369 | const struct link_resource *link_res, | |
1370 | enum signal_type signal) | |
1371 | { | |
1372 | struct dc *dc = link->ctx->dc; | |
1373 | const struct link_hwss *link_hwss = get_link_hwss(link, link_res); | |
1374 | struct dmcu *dmcu = dc->res_pool->dmcu; | |
1375 | ||
1376 | if (signal == SIGNAL_TYPE_EDP && | |
5cf43f2c IC |
1377 | link->dc->hwss.edp_backlight_control && |
1378 | !link->skip_implict_edp_power_control) | |
9c75891f WL |
1379 | link->dc->hwss.edp_backlight_control(link, false); |
1380 | else if (dmcu != NULL && dmcu->funcs->lock_phy) | |
1381 | dmcu->funcs->lock_phy(dmcu); | |
1382 | ||
1383 | link_hwss->disable_link_output(link, link_res, signal); | |
1384 | link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; | |
1385 | ||
1386 | if (signal == SIGNAL_TYPE_EDP && | |
5cf43f2c IC |
1387 | link->dc->hwss.edp_backlight_control && |
1388 | !link->skip_implict_edp_power_control) | |
9c75891f WL |
1389 | link->dc->hwss.edp_power_control(link, false); |
1390 | else if (dmcu != NULL && dmcu->funcs->lock_phy) | |
1391 | dmcu->funcs->unlock_phy(dmcu); | |
1392 | ||
98ce7d32 | 1393 | dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); |
9c75891f WL |
1394 | |
1395 | apply_symclk_on_tx_off_wa(link); | |
594b237b | 1396 | } |
43080c9b AL |
1397 | |
1398 | /* For SubVP the main pipe can have a viewport position change | |
1399 | * without a full update. In this case we must also update the | |
1400 | * viewport positions for the phantom pipe accordingly. | |
1401 | */ | |
1402 | void dcn32_update_phantom_vp_position(struct dc *dc, | |
1403 | struct dc_state *context, | |
1404 | struct pipe_ctx *phantom_pipe) | |
1405 | { | |
ca86bbdd | 1406 | uint32_t i; |
43080c9b AL |
1407 | struct dc_plane_state *phantom_plane = phantom_pipe->plane_state; |
1408 | ||
1409 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
1410 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; | |
1411 | ||
09a4ec5d | 1412 | if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN && |
012a04b1 | 1413 | dc_state_get_paired_subvp_stream(context, pipe->stream) == phantom_pipe->stream) { |
43080c9b AL |
1414 | if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) { |
1415 | ||
1416 | phantom_plane->src_rect.x = pipe->plane_state->src_rect.x; | |
1417 | phantom_plane->src_rect.y = pipe->plane_state->src_rect.y; | |
1418 | phantom_plane->clip_rect.x = pipe->plane_state->clip_rect.x; | |
1419 | phantom_plane->dst_rect.x = pipe->plane_state->dst_rect.x; | |
1420 | phantom_plane->dst_rect.y = pipe->plane_state->dst_rect.y; | |
1421 | ||
1422 | phantom_pipe->plane_state->update_flags.bits.position_change = 1; | |
1423 | resource_build_scaling_params(phantom_pipe); | |
1424 | return; | |
1425 | } | |
1426 | } | |
1427 | } | |
1428 | } | |
1e939ea1 | 1429 | |
e267f5e6 AL |
1430 | /* Treat the phantom pipe as if it needs to be fully enabled. |
1431 | * If the pipe was previously in use but not phantom, it would | |
1432 | * have been disabled earlier in the sequence so we need to run | |
1433 | * the full enable sequence. | |
1434 | */ | |
1435 | void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe) | |
1436 | { | |
1437 | phantom_pipe->update_flags.raw = 0; | |
012a04b1 DV |
1438 | if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) { |
1439 | phantom_pipe->update_flags.bits.enable = 1; | |
1440 | phantom_pipe->update_flags.bits.mpcc = 1; | |
1441 | phantom_pipe->update_flags.bits.dppclk = 1; | |
1442 | phantom_pipe->update_flags.bits.hubp_interdependent = 1; | |
1443 | phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; | |
1444 | phantom_pipe->update_flags.bits.gamut_remap = 1; | |
1445 | phantom_pipe->update_flags.bits.scaler = 1; | |
1446 | phantom_pipe->update_flags.bits.viewport = 1; | |
1447 | phantom_pipe->update_flags.bits.det_size = 1; | |
1448 | if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) { | |
1449 | phantom_pipe->update_flags.bits.odm = 1; | |
1450 | phantom_pipe->update_flags.bits.global_sync = 1; | |
e267f5e6 AL |
1451 | } |
1452 | } | |
1453 | } | |
1454 | ||
1e939ea1 DV |
1455 | bool dcn32_dsc_pg_status( |
1456 | struct dce_hwseq *hws, | |
1457 | unsigned int dsc_inst) | |
1458 | { | |
1459 | uint32_t pwr_status = 0; | |
1460 | ||
1461 | switch (dsc_inst) { | |
1462 | case 0: /* DSC0 */ | |
1463 | REG_GET(DOMAIN16_PG_STATUS, | |
1464 | DOMAIN_PGFSM_PWR_STATUS, &pwr_status); | |
1465 | break; | |
1466 | case 1: /* DSC1 */ | |
1467 | ||
1468 | REG_GET(DOMAIN17_PG_STATUS, | |
1469 | DOMAIN_PGFSM_PWR_STATUS, &pwr_status); | |
1470 | break; | |
1471 | case 2: /* DSC2 */ | |
1472 | REG_GET(DOMAIN18_PG_STATUS, | |
1473 | DOMAIN_PGFSM_PWR_STATUS, &pwr_status); | |
1474 | break; | |
1475 | case 3: /* DSC3 */ | |
1476 | REG_GET(DOMAIN19_PG_STATUS, | |
1477 | DOMAIN_PGFSM_PWR_STATUS, &pwr_status); | |
1478 | break; | |
1479 | default: | |
1480 | BREAK_TO_DEBUGGER(); | |
1481 | break; | |
1482 | } | |
1483 | ||
1f768ba4 | 1484 | return pwr_status == 0; |
1e939ea1 DV |
1485 | } |
1486 | ||
1487 | void dcn32_update_dsc_pg(struct dc *dc, | |
1488 | struct dc_state *context, | |
1489 | bool safe_to_disable) | |
1490 | { | |
1491 | struct dce_hwseq *hws = dc->hwseq; | |
9e447c81 | 1492 | int i; |
1e939ea1 | 1493 | |
9e447c81 | 1494 | for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { |
1e939ea1 DV |
1495 | struct display_stream_compressor *dsc = dc->res_pool->dscs[i]; |
1496 | bool is_dsc_ungated = hws->funcs.dsc_pg_status(hws, dsc->inst); | |
1497 | ||
1498 | if (context->res_ctx.is_dsc_acquired[i]) { | |
1499 | if (!is_dsc_ungated) { | |
1500 | hws->funcs.dsc_pg_control(hws, dsc->inst, true); | |
1501 | } | |
1502 | } else if (safe_to_disable) { | |
1503 | if (is_dsc_ungated) { | |
1504 | hws->funcs.dsc_pg_control(hws, dsc->inst, false); | |
1505 | } | |
1506 | } | |
1507 | } | |
1508 | } | |
a5b50a0c | 1509 | |
6a068e64 AL |
1510 | void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context) |
1511 | { | |
1512 | struct dce_hwseq *hws = dc->hwseq; | |
1513 | int i; | |
1514 | ||
1515 | for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { | |
1516 | struct pipe_ctx *pipe_ctx_old = | |
1517 | &dc->current_state->res_ctx.pipe_ctx[i]; | |
1518 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | |
1519 | ||
1520 | if (!pipe_ctx_old->stream) | |
1521 | continue; | |
1522 | ||
1523 | if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM) | |
1524 | continue; | |
1525 | ||
1526 | if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) | |
1527 | continue; | |
1528 | ||
1529 | if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) || | |
1530 | (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) { | |
1531 | struct clock_source *old_clk = pipe_ctx_old->clock_source; | |
1532 | ||
1533 | if (hws->funcs.reset_back_end_for_pipe) | |
1534 | hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); | |
1535 | if (hws->funcs.enable_stream_gating) | |
1536 | hws->funcs.enable_stream_gating(dc, pipe_ctx_old); | |
1537 | if (old_clk) | |
1538 | old_clk->funcs->cs_power_down(old_clk); | |
1539 | } | |
1540 | } | |
1541 | } | |
1542 | ||
a5b50a0c AL |
1543 | void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) |
1544 | { | |
1545 | unsigned int i; | |
6a068e64 AL |
1546 | enum dc_status status = DC_OK; |
1547 | struct dce_hwseq *hws = dc->hwseq; | |
a5b50a0c AL |
1548 | |
1549 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
1550 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; | |
1551 | struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; | |
1552 | ||
1553 | /* If an active, non-phantom pipe is being transitioned into a phantom | |
1554 | * pipe, wait for the double buffer update to complete first before we do | |
1555 | * ANY phantom pipe programming. | |
1556 | */ | |
09a4ec5d DV |
1557 | if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM && |
1558 | old_pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) != SUBVP_PHANTOM) { | |
a5b50a0c AL |
1559 | old_pipe->stream_res.tg->funcs->wait_for_state( |
1560 | old_pipe->stream_res.tg, | |
1561 | CRTC_STATE_VBLANK); | |
1562 | old_pipe->stream_res.tg->funcs->wait_for_state( | |
1563 | old_pipe->stream_res.tg, | |
1564 | CRTC_STATE_VACTIVE); | |
1565 | } | |
1566 | } | |
1567 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
6a068e64 AL |
1568 | struct pipe_ctx *pipe_ctx_old = |
1569 | &dc->current_state->res_ctx.pipe_ctx[i]; | |
1570 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | |
1571 | ||
1572 | if (pipe_ctx->stream == NULL) | |
1573 | continue; | |
1574 | ||
1575 | if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) | |
1576 | continue; | |
1577 | ||
1578 | if (pipe_ctx->stream == pipe_ctx_old->stream && | |
1579 | pipe_ctx->stream->link->link_state_valid) { | |
1580 | continue; | |
a5b50a0c | 1581 | } |
6a068e64 AL |
1582 | |
1583 | if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) | |
1584 | continue; | |
1585 | ||
1586 | if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) | |
1587 | continue; | |
1588 | ||
1589 | if (hws->funcs.apply_single_controller_ctx_to_hw) | |
1590 | status = hws->funcs.apply_single_controller_ctx_to_hw( | |
1591 | pipe_ctx, | |
1592 | context, | |
1593 | dc); | |
1594 | ||
1595 | ASSERT(status == DC_OK); | |
1596 | ||
1597 | #ifdef CONFIG_DRM_AMD_DC_FP | |
1598 | if (hws->funcs.resync_fifo_dccg_dio) | |
b3c9c9af | 1599 | hws->funcs.resync_fifo_dccg_dio(hws, dc, context, i); |
6a068e64 | 1600 | #endif |
a5b50a0c AL |
1601 | } |
1602 | } | |
67d198da AL |
1603 | |
1604 | /* Blank pixel data during initialization */ | |
1605 | void dcn32_init_blank( | |
1606 | struct dc *dc, | |
1607 | struct timing_generator *tg) | |
1608 | { | |
1609 | struct dce_hwseq *hws = dc->hwseq; | |
1610 | enum dc_color_space color_space; | |
1611 | struct tg_color black_color = {0}; | |
1612 | struct output_pixel_processor *opp = NULL; | |
1613 | struct output_pixel_processor *bottom_opp = NULL; | |
1614 | uint32_t num_opps, opp_id_src0, opp_id_src1; | |
f851b078 | 1615 | uint32_t otg_active_width = 0, otg_active_height = 0; |
67d198da AL |
1616 | uint32_t i; |
1617 | ||
1618 | /* program opp dpg blank color */ | |
1619 | color_space = COLOR_SPACE_SRGB; | |
1620 | color_space_to_black_color(dc, color_space, &black_color); | |
1621 | ||
1622 | /* get the OTG active size */ | |
1623 | tg->funcs->get_otg_active_size(tg, | |
1624 | &otg_active_width, | |
1625 | &otg_active_height); | |
1626 | ||
1627 | /* get the OPTC source */ | |
1628 | tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); | |
1629 | ||
1630 | if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) { | |
1631 | ASSERT(false); | |
1632 | return; | |
1633 | } | |
1634 | ||
1635 | for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { | |
1636 | if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) { | |
1637 | opp = dc->res_pool->opps[i]; | |
1638 | break; | |
1639 | } | |
1640 | } | |
1641 | ||
1642 | if (num_opps == 2) { | |
1643 | otg_active_width = otg_active_width / 2; | |
1644 | ||
1645 | if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) { | |
1646 | ASSERT(false); | |
1647 | return; | |
1648 | } | |
1649 | for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { | |
1650 | if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src1) { | |
1651 | bottom_opp = dc->res_pool->opps[i]; | |
1652 | break; | |
1653 | } | |
1654 | } | |
1655 | } | |
1656 | ||
1657 | if (opp && opp->funcs->opp_set_disp_pattern_generator) | |
1658 | opp->funcs->opp_set_disp_pattern_generator( | |
1659 | opp, | |
1660 | CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, | |
1661 | CONTROLLER_DP_COLOR_SPACE_UDEFINED, | |
1662 | COLOR_DEPTH_UNDEFINED, | |
1663 | &black_color, | |
1664 | otg_active_width, | |
1665 | otg_active_height, | |
1666 | 0); | |
1667 | ||
1668 | if (num_opps == 2) { | |
1669 | if (bottom_opp && bottom_opp->funcs->opp_set_disp_pattern_generator) { | |
1670 | bottom_opp->funcs->opp_set_disp_pattern_generator( | |
1671 | bottom_opp, | |
1672 | CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, | |
1673 | CONTROLLER_DP_COLOR_SPACE_UDEFINED, | |
1674 | COLOR_DEPTH_UNDEFINED, | |
1675 | &black_color, | |
1676 | otg_active_width, | |
1677 | otg_active_height, | |
1678 | 0); | |
1679 | hws->funcs.wait_for_blank_complete(bottom_opp); | |
1680 | } | |
1681 | } | |
1682 | ||
1683 | if (opp) | |
1684 | hws->funcs.wait_for_blank_complete(opp); | |
1685 | } | |
e87a6c5b AL |
1686 | |
1687 | void dcn32_blank_phantom(struct dc *dc, | |
1688 | struct timing_generator *tg, | |
1689 | int width, | |
1690 | int height) | |
1691 | { | |
1692 | struct dce_hwseq *hws = dc->hwseq; | |
1693 | enum dc_color_space color_space; | |
1694 | struct tg_color black_color = {0}; | |
1695 | struct output_pixel_processor *opp = NULL; | |
1696 | uint32_t num_opps, opp_id_src0, opp_id_src1; | |
1697 | uint32_t otg_active_width, otg_active_height; | |
1698 | uint32_t i; | |
1699 | ||
1700 | /* program opp dpg blank color */ | |
1701 | color_space = COLOR_SPACE_SRGB; | |
1702 | color_space_to_black_color(dc, color_space, &black_color); | |
1703 | ||
1704 | otg_active_width = width; | |
1705 | otg_active_height = height; | |
1706 | ||
1707 | /* get the OPTC source */ | |
1708 | tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); | |
1709 | ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp); | |
1710 | ||
1711 | for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { | |
1712 | if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) { | |
1713 | opp = dc->res_pool->opps[i]; | |
1714 | break; | |
1715 | } | |
1716 | } | |
1717 | ||
1718 | if (opp && opp->funcs->opp_set_disp_pattern_generator) | |
1719 | opp->funcs->opp_set_disp_pattern_generator( | |
1720 | opp, | |
1721 | CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, | |
1722 | CONTROLLER_DP_COLOR_SPACE_UDEFINED, | |
1723 | COLOR_DEPTH_UNDEFINED, | |
1724 | &black_color, | |
1725 | otg_active_width, | |
1726 | otg_active_height, | |
1727 | 0); | |
1728 | ||
1729 | if (tg->funcs->is_tg_enabled(tg)) | |
1730 | hws->funcs.wait_for_blank_complete(opp); | |
1731 | } | |
15c6798a | 1732 | |
cc299120 DV |
1733 | /* phantom stream id's can change often, but can be identical between contexts. |
1734 | * This function checks for the condition the streams are identical to avoid | |
1735 | * redundant pipe transitions. | |
1736 | */ | |
1737 | static bool is_subvp_phantom_topology_transition_seamless( | |
1738 | const struct dc_state *cur_ctx, | |
1739 | const struct dc_state *new_ctx, | |
1740 | const struct pipe_ctx *cur_pipe, | |
1741 | const struct pipe_ctx *new_pipe) | |
1742 | { | |
1743 | enum mall_stream_type cur_pipe_type = dc_state_get_pipe_subvp_type(cur_ctx, cur_pipe); | |
1744 | enum mall_stream_type new_pipe_type = dc_state_get_pipe_subvp_type(new_ctx, new_pipe); | |
1745 | ||
1746 | const struct dc_stream_state *cur_paired_stream = dc_state_get_paired_subvp_stream(cur_ctx, cur_pipe->stream); | |
1747 | const struct dc_stream_state *new_paired_stream = dc_state_get_paired_subvp_stream(new_ctx, new_pipe->stream); | |
1748 | ||
1749 | return cur_pipe_type == SUBVP_PHANTOM && | |
1750 | cur_pipe_type == new_pipe_type && | |
1751 | cur_paired_stream && new_paired_stream && | |
1752 | cur_paired_stream->stream_id == new_paired_stream->stream_id; | |
1753 | } | |
1754 | ||
15c6798a WL |
1755 | bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, |
1756 | const struct dc_state *cur_ctx, | |
1757 | const struct dc_state *new_ctx) | |
1758 | { | |
1759 | int i; | |
1760 | const struct pipe_ctx *cur_pipe, *new_pipe; | |
1761 | bool is_seamless = true; | |
1762 | ||
1763 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
1764 | cur_pipe = &cur_ctx->res_ctx.pipe_ctx[i]; | |
1765 | new_pipe = &new_ctx->res_ctx.pipe_ctx[i]; | |
1766 | ||
1767 | if (resource_is_pipe_type(cur_pipe, FREE_PIPE) || | |
1768 | resource_is_pipe_type(new_pipe, FREE_PIPE)) | |
1769 | /* adding or removing free pipes is always seamless */ | |
1770 | continue; | |
1771 | else if (resource_is_pipe_type(cur_pipe, OTG_MASTER)) { | |
1772 | if (resource_is_pipe_type(new_pipe, OTG_MASTER)) | |
cc299120 DV |
1773 | if (cur_pipe->stream->stream_id == new_pipe->stream->stream_id || |
1774 | is_subvp_phantom_topology_transition_seamless(cur_ctx, new_ctx, cur_pipe, new_pipe)) | |
15c6798a WL |
1775 | /* OTG master with the same stream is seamless */ |
1776 | continue; | |
1777 | } else if (resource_is_pipe_type(cur_pipe, OPP_HEAD)) { | |
1778 | if (resource_is_pipe_type(new_pipe, OPP_HEAD)) { | |
1779 | if (cur_pipe->stream_res.tg == new_pipe->stream_res.tg) | |
1780 | /* | |
1781 | * OPP heads sharing the same timing | |
1782 | * generator is seamless | |
1783 | */ | |
1784 | continue; | |
1785 | } | |
1786 | } else if (resource_is_pipe_type(cur_pipe, DPP_PIPE)) { | |
1787 | if (resource_is_pipe_type(new_pipe, DPP_PIPE)) { | |
1788 | if (cur_pipe->stream_res.opp == new_pipe->stream_res.opp) | |
1789 | /* | |
1790 | * DPP pipes sharing the same OPP head is | |
1791 | * seamless | |
1792 | */ | |
1793 | continue; | |
1794 | } | |
1795 | } | |
1796 | ||
1797 | /* | |
1798 | * This pipe's transition doesn't fall under any seamless | |
1799 | * conditions | |
1800 | */ | |
1801 | is_seamless = false; | |
1802 | break; | |
1803 | } | |
1804 | ||
1805 | return is_seamless; | |
1806 | } | |
f583db81 AL |
1807 | |
1808 | void dcn32_prepare_bandwidth(struct dc *dc, | |
1809 | struct dc_state *context) | |
1810 | { | |
1811 | bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; | |
1812 | /* Any transition into an FPO config should disable MCLK switching first to avoid | |
1813 | * driver and FW P-State synchronization issues. | |
1814 | */ | |
1815 | if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { | |
1816 | dc->optimized_required = true; | |
1817 | context->bw_ctx.bw.dcn.clk.p_state_change_support = false; | |
1818 | } | |
1819 | ||
1820 | if (dc->clk_mgr->dc_mode_softmax_enabled) | |
1821 | if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && | |
1822 | context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) | |
1823 | dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); | |
1824 | ||
1825 | dcn20_prepare_bandwidth(dc, context); | |
1826 | ||
1827 | if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) | |
1828 | dc_dmub_srv_p_state_delegate(dc, false, context); | |
1829 | ||
1830 | if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { | |
1831 | /* After disabling P-State, restore the original value to ensure we get the correct P-State | |
1832 | * on the next optimize. | |
1833 | */ | |
1834 | context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; | |
1835 | } | |
1836 | } | |
94040c2c WL |
1837 | |
1838 | void dcn32_interdependent_update_lock(struct dc *dc, | |
1839 | struct dc_state *context, bool lock) | |
1840 | { | |
1841 | unsigned int i; | |
1842 | struct pipe_ctx *pipe; | |
1843 | struct timing_generator *tg; | |
1844 | ||
1845 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
1846 | pipe = &context->res_ctx.pipe_ctx[i]; | |
1847 | tg = pipe->stream_res.tg; | |
1848 | ||
1849 | if (!resource_is_pipe_type(pipe, OTG_MASTER) || | |
1850 | !tg->funcs->is_tg_enabled(tg) || | |
1851 | dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) | |
1852 | continue; | |
1853 | ||
1854 | if (lock) | |
1855 | dc->hwss.pipe_control_lock(dc, pipe, true); | |
1856 | else | |
1857 | dc->hwss.pipe_control_lock(dc, pipe, false); | |
1858 | } | |
1859 | } | |
7a1eb668 DV |
1860 | |
1861 | void dcn32_program_outstanding_updates(struct dc *dc, | |
1862 | struct dc_state *context) | |
1863 | { | |
1864 | struct hubbub *hubbub = dc->res_pool->hubbub; | |
1865 | ||
1866 | /* update compbuf if required */ | |
1867 | if (hubbub->funcs->program_compbuf_size) | |
1868 | hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); | |
1869 | } |