Commit | Line | Data |
---|---|---|
9a70eba7 DL |
1 | /* |
2 | * Copyright 2012-16 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: AMD | |
23 | * | |
24 | */ | |
25 | ||
84e7fc05 | 26 | #include "dce_clk_mgr.h" |
4c5e8b54 | 27 | |
9a70eba7 | 28 | #include "reg_helper.h" |
9f72f51d | 29 | #include "dmcu.h" |
fb3466a4 | 30 | #include "core_types.h" |
14a13a0e | 31 | #include "dal_asic_id.h" |
15a27de2 | 32 | |
84e7fc05 DL |
33 | #define TO_DCE_CLK_MGR(clocks)\ |
34 | container_of(clocks, struct dce_clk_mgr, base) | |
9a70eba7 DL |
35 | |
36 | #define REG(reg) \ | |
84e7fc05 | 37 | (clk_mgr_dce->regs->reg) |
9a70eba7 DL |
38 | |
39 | #undef FN | |
40 | #define FN(reg_name, field_name) \ | |
84e7fc05 | 41 | clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name |
9a70eba7 DL |
42 | |
43 | #define CTX \ | |
84e7fc05 | 44 | clk_mgr_dce->base.ctx |
1296423b | 45 | #define DC_LOGGER \ |
84e7fc05 | 46 | clk_mgr->ctx->logger |
9a70eba7 | 47 | |
e11b86ad | 48 | /* Max clock values for each state indexed by "enum clocks_state": */ |
395f669e | 49 | static const struct state_dependent_clocks dce80_max_clks_by_state[] = { |
e11b86ad DL |
50 | /* ClocksStateInvalid - should not be used */ |
51 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | |
52 | /* ClocksStateUltraLow - not expected to be used for DCE 8.0 */ | |
53 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | |
54 | /* ClocksStateLow */ | |
55 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000}, | |
56 | /* ClocksStateNominal */ | |
57 | { .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, | |
58 | /* ClocksStatePerformance */ | |
59 | { .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; | |
60 | ||
395f669e | 61 | static const struct state_dependent_clocks dce110_max_clks_by_state[] = { |
e11b86ad DL |
62 | /*ClocksStateInvalid - should not be used*/ |
63 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | |
64 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | |
65 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, | |
66 | /*ClocksStateLow*/ | |
67 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, | |
68 | /*ClocksStateNominal*/ | |
69 | { .display_clk_khz = 467000, .pixel_clk_khz = 400000 }, | |
70 | /*ClocksStatePerformance*/ | |
71 | { .display_clk_khz = 643000, .pixel_clk_khz = 400000 } }; | |
72 | ||
395f669e | 73 | static const struct state_dependent_clocks dce112_max_clks_by_state[] = { |
e11b86ad DL |
74 | /*ClocksStateInvalid - should not be used*/ |
75 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | |
76 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | |
77 | { .display_clk_khz = 389189, .pixel_clk_khz = 346672 }, | |
78 | /*ClocksStateLow*/ | |
79 | { .display_clk_khz = 459000, .pixel_clk_khz = 400000 }, | |
80 | /*ClocksStateNominal*/ | |
81 | { .display_clk_khz = 667000, .pixel_clk_khz = 600000 }, | |
82 | /*ClocksStatePerformance*/ | |
83 | { .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } }; | |
84 | ||
395f669e | 85 | static const struct state_dependent_clocks dce120_max_clks_by_state[] = { |
2c8ad2d5 AD |
86 | /*ClocksStateInvalid - should not be used*/ |
87 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | |
88 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | |
89 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | |
90 | /*ClocksStateLow*/ | |
91 | { .display_clk_khz = 460000, .pixel_clk_khz = 400000 }, | |
92 | /*ClocksStateNominal*/ | |
93 | { .display_clk_khz = 670000, .pixel_clk_khz = 600000 }, | |
94 | /*ClocksStatePerformance*/ | |
95 | { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; | |
2c8ad2d5 | 96 | |
261f3924 | 97 | int dentist_get_divider_from_did(int did) |
e11b86ad | 98 | { |
294c7e73 DL |
99 | if (did < DENTIST_BASE_DID_1) |
100 | did = DENTIST_BASE_DID_1; | |
101 | if (did > DENTIST_MAX_DID) | |
102 | did = DENTIST_MAX_DID; | |
103 | ||
104 | if (did < DENTIST_BASE_DID_2) { | |
105 | return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP | |
106 | * (did - DENTIST_BASE_DID_1); | |
107 | } else if (did < DENTIST_BASE_DID_3) { | |
108 | return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP | |
109 | * (did - DENTIST_BASE_DID_2); | |
39a3cd67 | 110 | } else if (did < DENTIST_BASE_DID_4) { |
294c7e73 DL |
111 | return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP |
112 | * (did - DENTIST_BASE_DID_3); | |
39a3cd67 DL |
113 | } else { |
114 | return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP | |
115 | * (did - DENTIST_BASE_DID_4); | |
e11b86ad | 116 | } |
e11b86ad DL |
117 | } |
118 | ||
3cdecd45 DL |
119 | /* SW will adjust DP REF Clock average value for all purposes |
120 | * (DP DTO / DP Audio DTO and DP GTC) | |
121 | if clock is spread for all cases: | |
122 | -if SS enabled on DP Ref clock and HW de-spreading enabled with SW | |
123 | calculations for DS_INCR/DS_MODULO (this is planned to be default case) | |
124 | -if SS enabled on DP Ref clock and HW de-spreading enabled with HW | |
125 | calculations (not planned to be used, but average clock should still | |
126 | be valid) | |
127 | -if SS enabled on DP Ref clock and HW de-spreading disabled | |
128 | (should not be case with CIK) then SW should program all rates | |
129 | generated according to average value (case as with previous ASICs) | |
130 | */ | |
84e7fc05 | 131 | static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz) |
3cdecd45 | 132 | { |
84e7fc05 | 133 | if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) { |
3cdecd45 | 134 | struct fixed31_32 ss_percentage = dc_fixpt_div_int( |
84e7fc05 DL |
135 | dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage, |
136 | clk_mgr_dce->dprefclk_ss_divider), 200); | |
3cdecd45 DL |
137 | struct fixed31_32 adj_dp_ref_clk_khz; |
138 | ||
139 | ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage); | |
140 | adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz); | |
141 | dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz); | |
142 | } | |
143 | return dp_ref_clk_khz; | |
144 | } | |
145 | ||
84e7fc05 | 146 | static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) |
9a70eba7 | 147 | { |
84e7fc05 | 148 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); |
9a70eba7 DL |
149 | int dprefclk_wdivider; |
150 | int dprefclk_src_sel; | |
151 | int dp_ref_clk_khz = 600000; | |
472800a0 | 152 | int target_div; |
9a70eba7 DL |
153 | |
154 | /* ASSERT DP Reference Clock source is from DFS*/ | |
155 | REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); | |
156 | ASSERT(dprefclk_src_sel == 0); | |
157 | ||
158 | /* Read the mmDENTIST_DISPCLK_CNTL to get the currently | |
159 | * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ | |
160 | REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); | |
161 | ||
162 | /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ | |
472800a0 DL |
163 | target_div = dentist_get_divider_from_did(dprefclk_wdivider); |
164 | ||
165 | /* Calculate the current DFS clock, in kHz.*/ | |
294c7e73 | 166 | dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR |
84e7fc05 | 167 | * clk_mgr_dce->dentist_vco_freq_khz) / target_div; |
9a70eba7 | 168 | |
84e7fc05 | 169 | return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz); |
9a70eba7 DL |
170 | } |
171 | ||
84e7fc05 | 172 | int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) |
3f6d7435 | 173 | { |
84e7fc05 | 174 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); |
3f6d7435 | 175 | |
84e7fc05 | 176 | return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz); |
24f7dd7e DL |
177 | } |
178 | ||
179 | /* unit: in_khz before mode set, get pixel clock from context. ASIC register | |
180 | * may not be programmed yet | |
181 | */ | |
182 | static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context) | |
183 | { | |
184 | uint32_t max_pix_clk = 0; | |
185 | int i; | |
186 | ||
187 | for (i = 0; i < MAX_PIPES; i++) { | |
188 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | |
189 | ||
190 | if (pipe_ctx->stream == NULL) | |
191 | continue; | |
192 | ||
193 | /* do not check under lay */ | |
194 | if (pipe_ctx->top_pipe) | |
195 | continue; | |
196 | ||
380604e2 KC |
197 | if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 > max_pix_clk) |
198 | max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10; | |
24f7dd7e DL |
199 | |
200 | /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS | |
201 | * logic for HBR3 still needs Nominal (0.8V) on VDDC rail | |
202 | */ | |
203 | if (dc_is_dp_signal(pipe_ctx->stream->signal) && | |
204 | pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk) | |
205 | max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk; | |
206 | } | |
207 | ||
208 | return max_pix_clk; | |
3f6d7435 | 209 | } |
3cdecd45 | 210 | |
9a70eba7 | 211 | static enum dm_pp_clocks_state dce_get_required_clocks_state( |
84e7fc05 | 212 | struct clk_mgr *clk_mgr, |
24f7dd7e | 213 | struct dc_state *context) |
9a70eba7 | 214 | { |
84e7fc05 | 215 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); |
9a70eba7 DL |
216 | int i; |
217 | enum dm_pp_clocks_state low_req_clk; | |
24f7dd7e | 218 | int max_pix_clk = get_max_pixel_clock_for_all_paths(context); |
9a70eba7 DL |
219 | |
220 | /* Iterate from highest supported to lowest valid state, and update | |
221 | * lowest RequiredState with the lowest state that satisfies | |
222 | * all required clocks | |
223 | */ | |
84e7fc05 | 224 | for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--) |
813d20dc | 225 | if (context->bw_ctx.bw.dce.dispclk_khz > |
84e7fc05 | 226 | clk_mgr_dce->max_clks_by_state[i].display_clk_khz |
24f7dd7e | 227 | || max_pix_clk > |
84e7fc05 | 228 | clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz) |
9a70eba7 DL |
229 | break; |
230 | ||
231 | low_req_clk = i + 1; | |
84e7fc05 | 232 | if (low_req_clk > clk_mgr_dce->max_clks_state) { |
fab55d61 | 233 | /* set max clock state for high phyclock, invalid on exceeding display clock */ |
84e7fc05 | 234 | if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz |
813d20dc | 235 | < context->bw_ctx.bw.dce.dispclk_khz) |
fab55d61 DL |
236 | low_req_clk = DM_PP_CLOCKS_STATE_INVALID; |
237 | else | |
84e7fc05 | 238 | low_req_clk = clk_mgr_dce->max_clks_state; |
9a70eba7 DL |
239 | } |
240 | ||
241 | return low_req_clk; | |
242 | } | |
243 | ||
b8e9eb72 | 244 | static int dce_set_clock( |
84e7fc05 | 245 | struct clk_mgr *clk_mgr, |
e11b86ad | 246 | int requested_clk_khz) |
9a70eba7 | 247 | { |
84e7fc05 | 248 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); |
9a70eba7 | 249 | struct bp_pixel_clock_parameters pxl_clk_params = { 0 }; |
84e7fc05 | 250 | struct dc_bios *bp = clk_mgr->ctx->dc_bios; |
b8e9eb72 | 251 | int actual_clock = requested_clk_khz; |
84e7fc05 | 252 | struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu; |
9a70eba7 DL |
253 | |
254 | /* Make sure requested clock isn't lower than minimum threshold*/ | |
255 | if (requested_clk_khz > 0) | |
7d7024ca | 256 | requested_clk_khz = max(requested_clk_khz, |
84e7fc05 | 257 | clk_mgr_dce->dentist_vco_freq_khz / 64); |
9a70eba7 DL |
258 | |
259 | /* Prepare to program display clock*/ | |
380604e2 | 260 | pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10; |
9a70eba7 DL |
261 | pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; |
262 | ||
84e7fc05 | 263 | if (clk_mgr_dce->dfs_bypass_active) |
4e60536d NK |
264 | pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; |
265 | ||
9a70eba7 DL |
266 | bp->funcs->program_display_engine_pll(bp, &pxl_clk_params); |
267 | ||
84e7fc05 | 268 | if (clk_mgr_dce->dfs_bypass_active) { |
9a70eba7 | 269 | /* Cache the fixed display clock*/ |
84e7fc05 | 270 | clk_mgr_dce->dfs_bypass_disp_clk = |
9a70eba7 | 271 | pxl_clk_params.dfs_bypass_display_clock; |
b8e9eb72 | 272 | actual_clock = pxl_clk_params.dfs_bypass_display_clock; |
9a70eba7 DL |
273 | } |
274 | ||
275 | /* from power down, we need mark the clock state as ClocksStateNominal | |
276 | * from HWReset, so when resume we will call pplib voltage regulator.*/ | |
277 | if (requested_clk_khz == 0) | |
84e7fc05 | 278 | clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; |
9a70eba7 | 279 | |
4b594b8d DF |
280 | if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) |
281 | dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7); | |
9f72f51d | 282 | |
24f7dd7e | 283 | return actual_clock; |
9a70eba7 DL |
284 | } |
285 | ||
84e7fc05 | 286 | int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz) |
9a70eba7 | 287 | { |
84e7fc05 | 288 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); |
9a70eba7 | 289 | struct bp_set_dce_clock_parameters dce_clk_params; |
84e7fc05 DL |
290 | struct dc_bios *bp = clk_mgr->ctx->dc_bios; |
291 | struct dc *core_dc = clk_mgr->ctx->dc; | |
9f72f51d | 292 | struct dmcu *dmcu = core_dc->res_pool->dmcu; |
b8e9eb72 | 293 | int actual_clock = requested_clk_khz; |
9a70eba7 DL |
294 | /* Prepare to program display clock*/ |
295 | memset(&dce_clk_params, 0, sizeof(dce_clk_params)); | |
296 | ||
297 | /* Make sure requested clock isn't lower than minimum threshold*/ | |
298 | if (requested_clk_khz > 0) | |
7d7024ca | 299 | requested_clk_khz = max(requested_clk_khz, |
84e7fc05 | 300 | clk_mgr_dce->dentist_vco_freq_khz / 62); |
9a70eba7 DL |
301 | |
302 | dce_clk_params.target_clock_frequency = requested_clk_khz; | |
303 | dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; | |
304 | dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; | |
305 | ||
306 | bp->funcs->set_dce_clock(bp, &dce_clk_params); | |
b8e9eb72 | 307 | actual_clock = dce_clk_params.target_clock_frequency; |
9a70eba7 DL |
308 | |
309 | /* from power down, we need mark the clock state as ClocksStateNominal | |
310 | * from HWReset, so when resume we will call pplib voltage regulator.*/ | |
311 | if (requested_clk_khz == 0) | |
84e7fc05 | 312 | clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; |
9a70eba7 DL |
313 | |
314 | /*Program DP ref Clock*/ | |
315 | /*VBIOS will determine DPREFCLK frequency, so we don't set it*/ | |
316 | dce_clk_params.target_clock_frequency = 0; | |
317 | dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK; | |
84e7fc05 | 318 | if (!ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev)) |
14a13a0e FX |
319 | dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = |
320 | (dce_clk_params.pll_id == | |
321 | CLOCK_SOURCE_COMBO_DISPLAY_PLL0); | |
322 | else | |
323 | dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false; | |
9a70eba7 DL |
324 | |
325 | bp->funcs->set_dce_clock(bp, &dce_clk_params); | |
ece4f358 | 326 | |
6d5d346f | 327 | if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { |
4b594b8d DF |
328 | if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { |
329 | if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) | |
330 | dmcu->funcs->set_psr_wait_loop(dmcu, | |
331 | actual_clock / 1000 / 7); | |
332 | } | |
6d5d346f KC |
333 | } |
334 | ||
84e7fc05 | 335 | clk_mgr_dce->dfs_bypass_disp_clk = actual_clock; |
b8e9eb72 | 336 | return actual_clock; |
9a70eba7 DL |
337 | } |
338 | ||
84e7fc05 | 339 | static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce) |
9a70eba7 | 340 | { |
84e7fc05 DL |
341 | struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug; |
342 | struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; | |
c2e218dd | 343 | struct integrated_info info = { { { 0 } } }; |
1515a47b | 344 | struct dc_firmware_info fw_info = { { 0 } }; |
9a70eba7 DL |
345 | int i; |
346 | ||
347 | if (bp->integrated_info) | |
348 | info = *bp->integrated_info; | |
349 | ||
84e7fc05 DL |
350 | clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq; |
351 | if (clk_mgr_dce->dentist_vco_freq_khz == 0) { | |
9a70eba7 | 352 | bp->funcs->get_firmware_info(bp, &fw_info); |
84e7fc05 | 353 | clk_mgr_dce->dentist_vco_freq_khz = |
9a70eba7 | 354 | fw_info.smu_gpu_pll_output_freq; |
84e7fc05 DL |
355 | if (clk_mgr_dce->dentist_vco_freq_khz == 0) |
356 | clk_mgr_dce->dentist_vco_freq_khz = 3600000; | |
9a70eba7 DL |
357 | } |
358 | ||
359 | /*update the maximum display clock for each power state*/ | |
360 | for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { | |
361 | enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID; | |
362 | ||
363 | switch (i) { | |
364 | case 0: | |
365 | clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW; | |
366 | break; | |
367 | ||
368 | case 1: | |
369 | clk_state = DM_PP_CLOCKS_STATE_LOW; | |
370 | break; | |
371 | ||
372 | case 2: | |
373 | clk_state = DM_PP_CLOCKS_STATE_NOMINAL; | |
374 | break; | |
375 | ||
376 | case 3: | |
377 | clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE; | |
378 | break; | |
379 | ||
380 | default: | |
381 | clk_state = DM_PP_CLOCKS_STATE_INVALID; | |
382 | break; | |
383 | } | |
384 | ||
385 | /*Do not allow bad VBIOS/SBIOS to override with invalid values, | |
386 | * check for > 100MHz*/ | |
387 | if (info.disp_clk_voltage[i].max_supported_clk >= 100000) | |
84e7fc05 | 388 | clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz = |
9a70eba7 DL |
389 | info.disp_clk_voltage[i].max_supported_clk; |
390 | } | |
391 | ||
85944914 | 392 | if (!debug->disable_dfs_bypass && bp->integrated_info) |
9a70eba7 | 393 | if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) |
84e7fc05 | 394 | clk_mgr_dce->dfs_bypass_enabled = true; |
9a70eba7 DL |
395 | } |
396 | ||
84e7fc05 | 397 | void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce) |
9a70eba7 | 398 | { |
84e7fc05 | 399 | struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; |
9a70eba7 DL |
400 | int ss_info_num = bp->funcs->get_ss_entry_number( |
401 | bp, AS_SIGNAL_TYPE_GPU_PLL); | |
402 | ||
403 | if (ss_info_num) { | |
c2e218dd | 404 | struct spread_spectrum_info info = { { 0 } }; |
9a70eba7 DL |
405 | enum bp_result result = bp->funcs->get_spread_spectrum_info( |
406 | bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info); | |
407 | ||
408 | /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS | |
409 | * even if SS not enabled and in that case | |
410 | * SSInfo.spreadSpectrumPercentage !=0 would be sign | |
411 | * that SS is enabled | |
412 | */ | |
413 | if (result == BP_RESULT_OK && | |
414 | info.spread_spectrum_percentage != 0) { | |
84e7fc05 DL |
415 | clk_mgr_dce->ss_on_dprefclk = true; |
416 | clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; | |
9a70eba7 DL |
417 | |
418 | if (info.type.CENTER_MODE == 0) { | |
7d091f7a | 419 | /* TODO: Currently for DP Reference clock we |
9a70eba7 DL |
420 | * need only SS percentage for |
421 | * downspread */ | |
84e7fc05 | 422 | clk_mgr_dce->dprefclk_ss_percentage = |
9a70eba7 DL |
423 | info.spread_spectrum_percentage; |
424 | } | |
7d091f7a HW |
425 | |
426 | return; | |
9a70eba7 DL |
427 | } |
428 | ||
7d091f7a HW |
429 | result = bp->funcs->get_spread_spectrum_info( |
430 | bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info); | |
431 | ||
432 | /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS | |
433 | * even if SS not enabled and in that case | |
434 | * SSInfo.spreadSpectrumPercentage !=0 would be sign | |
435 | * that SS is enabled | |
436 | */ | |
437 | if (result == BP_RESULT_OK && | |
438 | info.spread_spectrum_percentage != 0) { | |
84e7fc05 DL |
439 | clk_mgr_dce->ss_on_dprefclk = true; |
440 | clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; | |
7d091f7a HW |
441 | |
442 | if (info.type.CENTER_MODE == 0) { | |
443 | /* Currently for DP Reference clock we | |
444 | * need only SS percentage for | |
445 | * downspread */ | |
84e7fc05 | 446 | clk_mgr_dce->dprefclk_ss_percentage = |
7d091f7a HW |
447 | info.spread_spectrum_percentage; |
448 | } | |
449 | } | |
9a70eba7 DL |
450 | } |
451 | } | |
452 | ||
508f5fcb LL |
453 | /** |
454 | * dce121_clock_patch_xgmi_ss_info() - Save XGMI spread spectrum info | |
455 | * @clk_mgr: clock manager base structure | |
456 | * | |
457 | * Reads from VBIOS the XGMI spread spectrum info and saves it within | |
458 | * the dce clock manager. This operation will overwrite the existing dprefclk | |
459 | * SS values if the vBIOS query succeeds. Otherwise, it does nothing. It also | |
460 | * sets the ->xgmi_enabled flag. | |
461 | */ | |
462 | void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr) | |
463 | { | |
464 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | |
465 | enum bp_result result; | |
466 | struct spread_spectrum_info info = { { 0 } }; | |
467 | struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; | |
468 | ||
469 | clk_mgr_dce->xgmi_enabled = false; | |
470 | ||
471 | result = bp->funcs->get_spread_spectrum_info(bp, AS_SIGNAL_TYPE_XGMI, | |
472 | 0, &info); | |
473 | if (result == BP_RESULT_OK && info.spread_spectrum_percentage != 0) { | |
474 | clk_mgr_dce->xgmi_enabled = true; | |
475 | clk_mgr_dce->ss_on_dprefclk = true; | |
476 | clk_mgr_dce->dprefclk_ss_divider = | |
477 | info.spread_percentage_divider; | |
478 | ||
479 | if (info.type.CENTER_MODE == 0) { | |
480 | /* Currently for DP Reference clock we | |
481 | * need only SS percentage for | |
482 | * downspread */ | |
483 | clk_mgr_dce->dprefclk_ss_percentage = | |
484 | info.spread_spectrum_percentage; | |
485 | } | |
486 | } | |
487 | } | |
488 | ||
4c5e8b54 | 489 | void dce110_fill_display_configs( |
24f7dd7e DL |
490 | const struct dc_state *context, |
491 | struct dm_pp_display_configuration *pp_display_cfg) | |
2c8ad2d5 | 492 | { |
24f7dd7e DL |
493 | int j; |
494 | int num_cfgs = 0; | |
2c8ad2d5 | 495 | |
24f7dd7e DL |
496 | for (j = 0; j < context->stream_count; j++) { |
497 | int k; | |
481f576c | 498 | |
24f7dd7e DL |
499 | const struct dc_stream_state *stream = context->streams[j]; |
500 | struct dm_pp_single_disp_config *cfg = | |
501 | &pp_display_cfg->disp_configs[num_cfgs]; | |
502 | const struct pipe_ctx *pipe_ctx = NULL; | |
fab55d61 | 503 | |
24f7dd7e DL |
504 | for (k = 0; k < MAX_PIPES; k++) |
505 | if (stream == context->res_ctx.pipe_ctx[k].stream) { | |
506 | pipe_ctx = &context->res_ctx.pipe_ctx[k]; | |
507 | break; | |
508 | } | |
509 | ||
510 | ASSERT(pipe_ctx != NULL); | |
511 | ||
512 | /* only notify active stream */ | |
513 | if (stream->dpms_off) | |
514 | continue; | |
515 | ||
516 | num_cfgs++; | |
517 | cfg->signal = pipe_ctx->stream->signal; | |
518 | cfg->pipe_idx = pipe_ctx->stream_res.tg->inst; | |
519 | cfg->src_height = stream->src.height; | |
520 | cfg->src_width = stream->src.width; | |
521 | cfg->ddi_channel_mapping = | |
ceb3dbb4 | 522 | stream->link->ddi_channel_mapping.raw; |
24f7dd7e | 523 | cfg->transmitter = |
ceb3dbb4 | 524 | stream->link->link_enc->transmitter; |
24f7dd7e | 525 | cfg->link_settings.lane_count = |
ceb3dbb4 | 526 | stream->link->cur_link_settings.lane_count; |
24f7dd7e | 527 | cfg->link_settings.link_rate = |
ceb3dbb4 | 528 | stream->link->cur_link_settings.link_rate; |
24f7dd7e | 529 | cfg->link_settings.link_spread = |
ceb3dbb4 | 530 | stream->link->cur_link_settings.link_spread; |
24f7dd7e DL |
531 | cfg->sym_clock = stream->phy_pix_clk; |
532 | /* Round v_refresh*/ | |
380604e2 | 533 | cfg->v_refresh = stream->timing.pix_clk_100hz * 100; |
24f7dd7e DL |
534 | cfg->v_refresh /= stream->timing.h_total; |
535 | cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) | |
536 | / stream->timing.v_total; | |
2c8ad2d5 AD |
537 | } |
538 | ||
24f7dd7e DL |
539 | pp_display_cfg->display_count = num_cfgs; |
540 | } | |
fd8cc371 | 541 | |
24f7dd7e DL |
542 | static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) |
543 | { | |
544 | uint8_t j; | |
545 | uint32_t min_vertical_blank_time = -1; | |
546 | ||
547 | for (j = 0; j < context->stream_count; j++) { | |
548 | struct dc_stream_state *stream = context->streams[j]; | |
549 | uint32_t vertical_blank_in_pixels = 0; | |
550 | uint32_t vertical_blank_time = 0; | |
551 | ||
552 | vertical_blank_in_pixels = stream->timing.h_total * | |
553 | (stream->timing.v_total | |
554 | - stream->timing.v_addressable); | |
555 | ||
556 | vertical_blank_time = vertical_blank_in_pixels | |
380604e2 | 557 | * 10000 / stream->timing.pix_clk_100hz; |
24f7dd7e DL |
558 | |
559 | if (min_vertical_blank_time > vertical_blank_time) | |
560 | min_vertical_blank_time = vertical_blank_time; | |
561 | } | |
562 | ||
563 | return min_vertical_blank_time; | |
564 | } | |
565 | ||
566 | static int determine_sclk_from_bounding_box( | |
567 | const struct dc *dc, | |
568 | int required_sclk) | |
569 | { | |
570 | int i; | |
571 | ||
572 | /* | |
573 | * Some asics do not give us sclk levels, so we just report the actual | |
574 | * required sclk | |
575 | */ | |
576 | if (dc->sclk_lvls.num_levels == 0) | |
577 | return required_sclk; | |
578 | ||
579 | for (i = 0; i < dc->sclk_lvls.num_levels; i++) { | |
580 | if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk) | |
581 | return dc->sclk_lvls.clocks_in_khz[i]; | |
582 | } | |
583 | /* | |
584 | * even maximum level could not satisfy requirement, this | |
585 | * is unexpected at this stage, should have been caught at | |
586 | * validation time | |
587 | */ | |
588 | ASSERT(0); | |
589 | return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1]; | |
590 | } | |
591 | ||
592 | static void dce_pplib_apply_display_requirements( | |
593 | struct dc *dc, | |
594 | struct dc_state *context) | |
595 | { | |
596 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | |
597 | ||
598 | pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); | |
599 | ||
600 | dce110_fill_display_configs(context, pp_display_cfg); | |
601 | ||
602 | if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) | |
603 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | |
604 | } | |
605 | ||
606 | static void dce11_pplib_apply_display_requirements( | |
607 | struct dc *dc, | |
608 | struct dc_state *context) | |
609 | { | |
610 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | |
611 | ||
612 | pp_display_cfg->all_displays_in_sync = | |
813d20dc | 613 | context->bw_ctx.bw.dce.all_displays_in_sync; |
24f7dd7e | 614 | pp_display_cfg->nb_pstate_switch_disable = |
813d20dc | 615 | context->bw_ctx.bw.dce.nbp_state_change_enable == false; |
24f7dd7e | 616 | pp_display_cfg->cpu_cc6_disable = |
813d20dc | 617 | context->bw_ctx.bw.dce.cpuc_state_change_enable == false; |
24f7dd7e | 618 | pp_display_cfg->cpu_pstate_disable = |
813d20dc | 619 | context->bw_ctx.bw.dce.cpup_state_change_enable == false; |
24f7dd7e | 620 | pp_display_cfg->cpu_pstate_separation_time = |
813d20dc | 621 | context->bw_ctx.bw.dce.blackout_recovery_time_us; |
24f7dd7e | 622 | |
813d20dc | 623 | pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz |
24f7dd7e DL |
624 | / MEMORY_TYPE_MULTIPLIER_CZ; |
625 | ||
626 | pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box( | |
627 | dc, | |
813d20dc | 628 | context->bw_ctx.bw.dce.sclk_khz); |
24f7dd7e | 629 | |
1f66b7ea RL |
630 | /* |
631 | * As workaround for >4x4K lightup set dcfclock to min_engine_clock value. | |
632 | * This is not required for less than 5 displays, | |
633 | * thus don't request decfclk in dc to avoid impact | |
634 | * on power saving. | |
635 | * | |
636 | */ | |
637 | pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)? | |
638 | pp_display_cfg->min_engine_clock_khz : 0; | |
8b955e00 | 639 | |
24f7dd7e | 640 | pp_display_cfg->min_engine_clock_deep_sleep_khz |
813d20dc | 641 | = context->bw_ctx.bw.dce.sclk_deep_sleep_khz; |
24f7dd7e DL |
642 | |
643 | pp_display_cfg->avail_mclk_switch_time_us = | |
644 | dce110_get_min_vblank_time_us(context); | |
645 | /* TODO: dce11.2*/ | |
646 | pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; | |
647 | ||
84e7fc05 | 648 | pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz; |
24f7dd7e DL |
649 | |
650 | dce110_fill_display_configs(context, pp_display_cfg); | |
651 | ||
652 | /* TODO: is this still applicable?*/ | |
653 | if (pp_display_cfg->display_count == 1) { | |
654 | const struct dc_crtc_timing *timing = | |
655 | &context->streams[0]->timing; | |
656 | ||
657 | pp_display_cfg->crtc_index = | |
658 | pp_display_cfg->disp_configs[0].pipe_idx; | |
380604e2 | 659 | pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz; |
2c8ad2d5 | 660 | } |
24f7dd7e DL |
661 | |
662 | if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) | |
663 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | |
664 | } | |
665 | ||
84e7fc05 | 666 | static void dce_update_clocks(struct clk_mgr *clk_mgr, |
24f7dd7e | 667 | struct dc_state *context, |
fab55d61 DL |
668 | bool safe_to_lower) |
669 | { | |
84e7fc05 | 670 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); |
fab55d61 | 671 | struct dm_pp_power_level_change_request level_change_req; |
813d20dc | 672 | int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; |
6c0984d5 | 673 | |
24f7dd7e | 674 | /*TODO: W/A for dal3 linux, investigate why this works */ |
84e7fc05 | 675 | if (!clk_mgr_dce->dfs_bypass_active) |
8d25a560 | 676 | patched_disp_clk = patched_disp_clk * 115 / 100; |
fab55d61 | 677 | |
84e7fc05 | 678 | level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); |
fab55d61 | 679 | /* get max clock state from PPLIB */ |
84e7fc05 DL |
680 | if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) |
681 | || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { | |
682 | if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) | |
683 | clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; | |
fab55d61 DL |
684 | } |
685 | ||
8d25a560 LL |
686 | if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { |
687 | patched_disp_clk = dce_set_clock(clk_mgr, patched_disp_clk); | |
688 | clk_mgr->clks.dispclk_khz = patched_disp_clk; | |
24f7dd7e | 689 | } |
84e7fc05 | 690 | dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); |
24f7dd7e DL |
691 | } |
692 | ||
84e7fc05 | 693 | static void dce11_update_clocks(struct clk_mgr *clk_mgr, |
24f7dd7e DL |
694 | struct dc_state *context, |
695 | bool safe_to_lower) | |
696 | { | |
84e7fc05 | 697 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); |
24f7dd7e | 698 | struct dm_pp_power_level_change_request level_change_req; |
813d20dc | 699 | int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; |
8852ae9a RL |
700 | |
701 | /*TODO: W/A for dal3 linux, investigate why this works */ | |
702 | if (!clk_mgr_dce->dfs_bypass_active) | |
703 | patched_disp_clk = patched_disp_clk * 115 / 100; | |
24f7dd7e | 704 | |
84e7fc05 | 705 | level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); |
24f7dd7e | 706 | /* get max clock state from PPLIB */ |
84e7fc05 DL |
707 | if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) |
708 | || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { | |
709 | if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) | |
710 | clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; | |
24f7dd7e DL |
711 | } |
712 | ||
8852ae9a | 713 | if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { |
813d20dc | 714 | context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk); |
8852ae9a | 715 | clk_mgr->clks.dispclk_khz = patched_disp_clk; |
24f7dd7e | 716 | } |
84e7fc05 | 717 | dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); |
24f7dd7e DL |
718 | } |
719 | ||
84e7fc05 | 720 | static void dce112_update_clocks(struct clk_mgr *clk_mgr, |
24f7dd7e DL |
721 | struct dc_state *context, |
722 | bool safe_to_lower) | |
723 | { | |
84e7fc05 | 724 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); |
24f7dd7e | 725 | struct dm_pp_power_level_change_request level_change_req; |
813d20dc | 726 | int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; |
8cd61c31 RL |
727 | |
728 | /*TODO: W/A for dal3 linux, investigate why this works */ | |
729 | if (!clk_mgr_dce->dfs_bypass_active) | |
8d25a560 | 730 | patched_disp_clk = patched_disp_clk * 115 / 100; |
24f7dd7e | 731 | |
84e7fc05 | 732 | level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); |
24f7dd7e | 733 | /* get max clock state from PPLIB */ |
84e7fc05 DL |
734 | if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) |
735 | || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { | |
736 | if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) | |
737 | clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; | |
fab55d61 | 738 | } |
24f7dd7e | 739 | |
8d25a560 LL |
740 | if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { |
741 | patched_disp_clk = dce112_set_clock(clk_mgr, patched_disp_clk); | |
742 | clk_mgr->clks.dispclk_khz = patched_disp_clk; | |
24f7dd7e | 743 | } |
84e7fc05 | 744 | dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); |
2c8ad2d5 AD |
745 | } |
746 | ||
84e7fc05 | 747 | static void dce12_update_clocks(struct clk_mgr *clk_mgr, |
24f7dd7e DL |
748 | struct dc_state *context, |
749 | bool safe_to_lower) | |
5a83c932 | 750 | { |
84e7fc05 | 751 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); |
24f7dd7e DL |
752 | struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; |
753 | int max_pix_clk = get_max_pixel_clock_for_all_paths(context); | |
813d20dc | 754 | int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; |
24f7dd7e | 755 | |
4244381c | 756 | /*TODO: W/A for dal3 linux, investigate why this works */ |
84e7fc05 | 757 | if (!clk_mgr_dce->dfs_bypass_active) |
8d25a560 | 758 | patched_disp_clk = patched_disp_clk * 115 / 100; |
24f7dd7e | 759 | |
8d25a560 | 760 | if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { |
24f7dd7e | 761 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; |
508f5fcb LL |
762 | /* |
763 | * When xGMI is enabled, the display clk needs to be adjusted | |
764 | * with the WAFL link's SS percentage. | |
765 | */ | |
766 | if (clk_mgr_dce->xgmi_enabled) | |
767 | patched_disp_clk = clk_mgr_adjust_dp_ref_freq_for_ss( | |
768 | clk_mgr_dce, patched_disp_clk); | |
8d25a560 LL |
769 | clock_voltage_req.clocks_in_khz = patched_disp_clk; |
770 | clk_mgr->clks.dispclk_khz = dce112_set_clock(clk_mgr, patched_disp_clk); | |
24f7dd7e | 771 | |
84e7fc05 | 772 | dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); |
24f7dd7e DL |
773 | } |
774 | ||
84e7fc05 | 775 | if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) { |
24f7dd7e DL |
776 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK; |
777 | clock_voltage_req.clocks_in_khz = max_pix_clk; | |
84e7fc05 | 778 | clk_mgr->clks.phyclk_khz = max_pix_clk; |
24f7dd7e | 779 | |
84e7fc05 | 780 | dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); |
24f7dd7e | 781 | } |
84e7fc05 | 782 | dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); |
5a83c932 NK |
783 | } |
784 | ||
84e7fc05 | 785 | static const struct clk_mgr_funcs dce120_funcs = { |
3cdecd45 | 786 | .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, |
fab55d61 | 787 | .update_clocks = dce12_update_clocks |
2c8ad2d5 | 788 | }; |
2c8ad2d5 | 789 | |
84e7fc05 | 790 | static const struct clk_mgr_funcs dce112_funcs = { |
3cdecd45 | 791 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, |
24f7dd7e | 792 | .update_clocks = dce112_update_clocks |
9a70eba7 DL |
793 | }; |
794 | ||
84e7fc05 | 795 | static const struct clk_mgr_funcs dce110_funcs = { |
3cdecd45 | 796 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, |
24f7dd7e | 797 | .update_clocks = dce11_update_clocks, |
9a70eba7 DL |
798 | }; |
799 | ||
84e7fc05 | 800 | static const struct clk_mgr_funcs dce_funcs = { |
3cdecd45 | 801 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, |
fab55d61 | 802 | .update_clocks = dce_update_clocks |
9a70eba7 DL |
803 | }; |
804 | ||
84e7fc05 DL |
805 | static void dce_clk_mgr_construct( |
806 | struct dce_clk_mgr *clk_mgr_dce, | |
9a70eba7 | 807 | struct dc_context *ctx, |
84e7fc05 DL |
808 | const struct clk_mgr_registers *regs, |
809 | const struct clk_mgr_shift *clk_shift, | |
810 | const struct clk_mgr_mask *clk_mask) | |
9a70eba7 | 811 | { |
84e7fc05 | 812 | struct clk_mgr *base = &clk_mgr_dce->base; |
4244381c | 813 | struct dm_pp_static_clock_info static_clk_info = {0}; |
9a70eba7 DL |
814 | |
815 | base->ctx = ctx; | |
816 | base->funcs = &dce_funcs; | |
817 | ||
84e7fc05 DL |
818 | clk_mgr_dce->regs = regs; |
819 | clk_mgr_dce->clk_mgr_shift = clk_shift; | |
820 | clk_mgr_dce->clk_mgr_mask = clk_mask; | |
9a70eba7 | 821 | |
84e7fc05 | 822 | clk_mgr_dce->dfs_bypass_disp_clk = 0; |
7d091f7a | 823 | |
84e7fc05 DL |
824 | clk_mgr_dce->dprefclk_ss_percentage = 0; |
825 | clk_mgr_dce->dprefclk_ss_divider = 1000; | |
826 | clk_mgr_dce->ss_on_dprefclk = false; | |
7d091f7a | 827 | |
4244381c DL |
828 | |
829 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | |
84e7fc05 | 830 | clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state; |
4244381c | 831 | else |
84e7fc05 DL |
832 | clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; |
833 | clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID; | |
9a70eba7 | 834 | |
84e7fc05 DL |
835 | dce_clock_read_integrated_info(clk_mgr_dce); |
836 | dce_clock_read_ss_info(clk_mgr_dce); | |
9a70eba7 DL |
837 | } |
838 | ||
84e7fc05 | 839 | struct clk_mgr *dce_clk_mgr_create( |
9a70eba7 | 840 | struct dc_context *ctx, |
84e7fc05 DL |
841 | const struct clk_mgr_registers *regs, |
842 | const struct clk_mgr_shift *clk_shift, | |
843 | const struct clk_mgr_mask *clk_mask) | |
9a70eba7 | 844 | { |
84e7fc05 | 845 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); |
9a70eba7 | 846 | |
84e7fc05 | 847 | if (clk_mgr_dce == NULL) { |
9a70eba7 DL |
848 | BREAK_TO_DEBUGGER(); |
849 | return NULL; | |
850 | } | |
851 | ||
84e7fc05 | 852 | memcpy(clk_mgr_dce->max_clks_by_state, |
e11b86ad DL |
853 | dce80_max_clks_by_state, |
854 | sizeof(dce80_max_clks_by_state)); | |
855 | ||
84e7fc05 DL |
856 | dce_clk_mgr_construct( |
857 | clk_mgr_dce, ctx, regs, clk_shift, clk_mask); | |
9a70eba7 | 858 | |
84e7fc05 | 859 | return &clk_mgr_dce->base; |
9a70eba7 DL |
860 | } |
861 | ||
84e7fc05 | 862 | struct clk_mgr *dce110_clk_mgr_create( |
9a70eba7 | 863 | struct dc_context *ctx, |
84e7fc05 DL |
864 | const struct clk_mgr_registers *regs, |
865 | const struct clk_mgr_shift *clk_shift, | |
866 | const struct clk_mgr_mask *clk_mask) | |
9a70eba7 | 867 | { |
84e7fc05 | 868 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); |
9a70eba7 | 869 | |
84e7fc05 | 870 | if (clk_mgr_dce == NULL) { |
9a70eba7 DL |
871 | BREAK_TO_DEBUGGER(); |
872 | return NULL; | |
873 | } | |
874 | ||
84e7fc05 | 875 | memcpy(clk_mgr_dce->max_clks_by_state, |
e11b86ad DL |
876 | dce110_max_clks_by_state, |
877 | sizeof(dce110_max_clks_by_state)); | |
878 | ||
84e7fc05 DL |
879 | dce_clk_mgr_construct( |
880 | clk_mgr_dce, ctx, regs, clk_shift, clk_mask); | |
9a70eba7 | 881 | |
84e7fc05 | 882 | clk_mgr_dce->base.funcs = &dce110_funcs; |
9a70eba7 | 883 | |
84e7fc05 | 884 | return &clk_mgr_dce->base; |
9a70eba7 DL |
885 | } |
886 | ||
84e7fc05 | 887 | struct clk_mgr *dce112_clk_mgr_create( |
9a70eba7 | 888 | struct dc_context *ctx, |
84e7fc05 DL |
889 | const struct clk_mgr_registers *regs, |
890 | const struct clk_mgr_shift *clk_shift, | |
891 | const struct clk_mgr_mask *clk_mask) | |
9a70eba7 | 892 | { |
84e7fc05 | 893 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); |
9a70eba7 | 894 | |
84e7fc05 | 895 | if (clk_mgr_dce == NULL) { |
9a70eba7 DL |
896 | BREAK_TO_DEBUGGER(); |
897 | return NULL; | |
898 | } | |
899 | ||
84e7fc05 | 900 | memcpy(clk_mgr_dce->max_clks_by_state, |
e11b86ad DL |
901 | dce112_max_clks_by_state, |
902 | sizeof(dce112_max_clks_by_state)); | |
903 | ||
84e7fc05 DL |
904 | dce_clk_mgr_construct( |
905 | clk_mgr_dce, ctx, regs, clk_shift, clk_mask); | |
9a70eba7 | 906 | |
84e7fc05 | 907 | clk_mgr_dce->base.funcs = &dce112_funcs; |
9a70eba7 | 908 | |
84e7fc05 | 909 | return &clk_mgr_dce->base; |
9a70eba7 DL |
910 | } |
911 | ||
84e7fc05 | 912 | struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx) |
2c8ad2d5 | 913 | { |
84e7fc05 | 914 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); |
2c8ad2d5 | 915 | |
84e7fc05 | 916 | if (clk_mgr_dce == NULL) { |
2c8ad2d5 AD |
917 | BREAK_TO_DEBUGGER(); |
918 | return NULL; | |
919 | } | |
920 | ||
84e7fc05 | 921 | memcpy(clk_mgr_dce->max_clks_by_state, |
2c8ad2d5 AD |
922 | dce120_max_clks_by_state, |
923 | sizeof(dce120_max_clks_by_state)); | |
924 | ||
84e7fc05 DL |
925 | dce_clk_mgr_construct( |
926 | clk_mgr_dce, ctx, NULL, NULL, NULL); | |
2c8ad2d5 | 927 | |
84e7fc05 DL |
928 | clk_mgr_dce->dprefclk_khz = 600000; |
929 | clk_mgr_dce->base.funcs = &dce120_funcs; | |
2c8ad2d5 | 930 | |
84e7fc05 | 931 | return &clk_mgr_dce->base; |
fab55d61 DL |
932 | } |
933 | ||
508f5fcb LL |
934 | struct clk_mgr *dce121_clk_mgr_create(struct dc_context *ctx) |
935 | { | |
936 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), | |
937 | GFP_KERNEL); | |
938 | ||
939 | if (clk_mgr_dce == NULL) { | |
940 | BREAK_TO_DEBUGGER(); | |
941 | return NULL; | |
942 | } | |
943 | ||
944 | memcpy(clk_mgr_dce->max_clks_by_state, dce120_max_clks_by_state, | |
945 | sizeof(dce120_max_clks_by_state)); | |
946 | ||
947 | dce_clk_mgr_construct(clk_mgr_dce, ctx, NULL, NULL, NULL); | |
948 | ||
949 | clk_mgr_dce->dprefclk_khz = 625000; | |
950 | clk_mgr_dce->base.funcs = &dce120_funcs; | |
951 | ||
952 | return &clk_mgr_dce->base; | |
953 | } | |
954 | ||
84e7fc05 | 955 | void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr) |
9a70eba7 | 956 | { |
84e7fc05 | 957 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr); |
9a70eba7 | 958 | |
84e7fc05 DL |
959 | kfree(clk_mgr_dce); |
960 | *clk_mgr = NULL; | |
9a70eba7 | 961 | } |