Commit | Line | Data |
---|---|---|
4c5e8b54 DL |
1 | /* |
2 | * Copyright 2018 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: AMD | |
23 | * | |
24 | */ | |
25 | ||
84e7fc05 | 26 | #include "dcn10_clk_mgr.h" |
4c5e8b54 DL |
27 | |
28 | #include "reg_helper.h" | |
29 | #include "core_types.h" | |
30 | ||
84e7fc05 DL |
31 | #define TO_DCE_CLK_MGR(clocks)\ |
32 | container_of(clocks, struct dce_clk_mgr, base) | |
4c5e8b54 DL |
33 | |
34 | #define REG(reg) \ | |
84e7fc05 | 35 | (clk_mgr_dce->regs->reg) |
4c5e8b54 DL |
36 | |
37 | #undef FN | |
38 | #define FN(reg_name, field_name) \ | |
84e7fc05 | 39 | clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name |
4c5e8b54 DL |
40 | |
41 | #define CTX \ | |
84e7fc05 | 42 | clk_mgr_dce->base.ctx |
4c5e8b54 | 43 | #define DC_LOGGER \ |
84e7fc05 | 44 | clk_mgr->ctx->logger |
4c5e8b54 | 45 | |
84e7fc05 | 46 | static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks) |
4c5e8b54 DL |
47 | { |
48 | bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; | |
84e7fc05 | 49 | bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz; |
4c5e8b54 | 50 | int disp_clk_threshold = new_clocks->max_supported_dppclk_khz; |
84e7fc05 | 51 | bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz; |
4c5e8b54 DL |
52 | |
53 | /* increase clock, looking for div is 0 for current, request div is 1*/ | |
54 | if (dispclk_increase) { | |
55 | /* already divided by 2, no need to reach target clk with 2 steps*/ | |
56 | if (cur_dpp_div) | |
57 | return new_clocks->dispclk_khz; | |
58 | ||
59 | /* request disp clk is lower than maximum supported dpp clk, | |
60 | * no need to reach target clk with two steps. | |
61 | */ | |
62 | if (new_clocks->dispclk_khz <= disp_clk_threshold) | |
63 | return new_clocks->dispclk_khz; | |
64 | ||
65 | /* target dpp clk not request divided by 2, still within threshold */ | |
66 | if (!request_dpp_div) | |
67 | return new_clocks->dispclk_khz; | |
68 | ||
69 | } else { | |
70 | /* decrease clock, looking for current dppclk divided by 2, | |
71 | * request dppclk not divided by 2. | |
72 | */ | |
73 | ||
74 | /* current dpp clk not divided by 2, no need to ramp*/ | |
75 | if (!cur_dpp_div) | |
76 | return new_clocks->dispclk_khz; | |
77 | ||
78 | /* current disp clk is lower than current maximum dpp clk, | |
79 | * no need to ramp | |
80 | */ | |
84e7fc05 | 81 | if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold) |
4c5e8b54 DL |
82 | return new_clocks->dispclk_khz; |
83 | ||
84 | /* request dpp clk need to be divided by 2 */ | |
85 | if (request_dpp_div) | |
86 | return new_clocks->dispclk_khz; | |
87 | } | |
88 | ||
89 | return disp_clk_threshold; | |
90 | } | |
91 | ||
84e7fc05 | 92 | static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks) |
4c5e8b54 | 93 | { |
84e7fc05 DL |
94 | struct dc *dc = clk_mgr->ctx->dc; |
95 | int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks); | |
4c5e8b54 DL |
96 | bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; |
97 | int i; | |
98 | ||
99 | /* set disp clk to dpp clk threshold */ | |
84e7fc05 | 100 | dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold); |
4c5e8b54 DL |
101 | |
102 | /* update request dpp clk division option */ | |
103 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |
104 | struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; | |
105 | ||
106 | if (!pipe_ctx->plane_state) | |
107 | continue; | |
108 | ||
109 | pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control( | |
110 | pipe_ctx->plane_res.dpp, | |
111 | request_dpp_div, | |
112 | true); | |
113 | } | |
114 | ||
115 | /* If target clk not same as dppclk threshold, set to target clock */ | |
116 | if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) | |
84e7fc05 | 117 | dce112_set_clock(clk_mgr, new_clocks->dispclk_khz); |
4c5e8b54 | 118 | |
84e7fc05 DL |
119 | clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; |
120 | clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz; | |
121 | clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz; | |
4c5e8b54 DL |
122 | } |
123 | ||
e2bf2007 EY |
124 | static int get_active_display_cnt( |
125 | struct dc *dc, | |
126 | struct dc_state *context) | |
127 | { | |
128 | int i, display_count; | |
129 | ||
130 | display_count = 0; | |
131 | for (i = 0; i < context->stream_count; i++) { | |
132 | const struct dc_stream_state *stream = context->streams[i]; | |
133 | ||
134 | /* | |
135 | * Only notify active stream or virtual stream. | |
136 | * Need to notify virtual stream to work around | |
137 | * headless case. HPD does not fire when system is in | |
138 | * S0i2. | |
139 | */ | |
140 | if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL) | |
141 | display_count++; | |
142 | } | |
143 | ||
144 | return display_count; | |
145 | } | |
146 | ||
84e7fc05 | 147 | static void dcn1_update_clocks(struct clk_mgr *clk_mgr, |
4c5e8b54 DL |
148 | struct dc_state *context, |
149 | bool safe_to_lower) | |
150 | { | |
84e7fc05 | 151 | struct dc *dc = clk_mgr->ctx->dc; |
51ba137e | 152 | struct dc_debug_options *debug = &dc->debug; |
4c5e8b54 DL |
153 | struct dc_clocks *new_clocks = &context->bw.dcn.clk; |
154 | struct pp_smu_display_requirement_rv *smu_req_cur = | |
155 | &dc->res_pool->pp_smu_req; | |
156 | struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; | |
a08ac5a6 | 157 | struct pp_smu_funcs_rv *pp_smu = NULL; |
4c5e8b54 DL |
158 | bool send_request_to_increase = false; |
159 | bool send_request_to_lower = false; | |
e2bf2007 EY |
160 | int display_count; |
161 | ||
162 | bool enter_display_off = false; | |
163 | ||
164 | display_count = get_active_display_cnt(dc, context); | |
a08ac5a6 CL |
165 | if (dc->res_pool->pp_smu) |
166 | pp_smu = &dc->res_pool->pp_smu->rv_funcs; | |
e2bf2007 EY |
167 | if (display_count == 0) |
168 | enter_display_off = true; | |
4c5e8b54 | 169 | |
e2bf2007 EY |
170 | if (enter_display_off == safe_to_lower) { |
171 | /* | |
172 | * Notify SMU active displays | |
173 | * if function pointer not set up, this message is | |
174 | * sent as part of pplib_apply_display_requirements. | |
175 | */ | |
a08ac5a6 | 176 | if (pp_smu && pp_smu->set_display_count) |
e2bf2007 | 177 | pp_smu->set_display_count(&pp_smu->pp_smu, display_count); |
e2bf2007 | 178 | |
4fd994c4 | 179 | smu_req.display_count = display_count; |
e2bf2007 | 180 | } |
4c5e8b54 | 181 | |
84e7fc05 DL |
182 | if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz |
183 | || new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz | |
184 | || new_clocks->fclk_khz > clk_mgr->clks.fclk_khz | |
185 | || new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz) | |
4c5e8b54 DL |
186 | send_request_to_increase = true; |
187 | ||
84e7fc05 DL |
188 | if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) { |
189 | clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz; | |
4c5e8b54 DL |
190 | |
191 | send_request_to_lower = true; | |
192 | } | |
193 | ||
98e90a34 | 194 | // F Clock |
51ba137e HH |
195 | if (debug->force_fclk_khz != 0) |
196 | new_clocks->fclk_khz = debug->force_fclk_khz; | |
197 | ||
84e7fc05 DL |
198 | if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) { |
199 | clk_mgr->clks.fclk_khz = new_clocks->fclk_khz; | |
ba7b267a | 200 | smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000; |
4c5e8b54 | 201 | |
4c5e8b54 DL |
202 | send_request_to_lower = true; |
203 | } | |
204 | ||
98e90a34 | 205 | //DCF Clock |
84e7fc05 DL |
206 | if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) { |
207 | clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz; | |
ba7b267a | 208 | smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000; |
4c5e8b54 DL |
209 | |
210 | send_request_to_lower = true; | |
211 | } | |
212 | ||
213 | if (should_set_clock(safe_to_lower, | |
84e7fc05 DL |
214 | new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) { |
215 | clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; | |
4fd994c4 | 216 | smu_req.min_deep_sleep_dcefclk_mhz = (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000; |
4c5e8b54 DL |
217 | |
218 | send_request_to_lower = true; | |
219 | } | |
220 | ||
221 | /* make sure dcf clk is before dpp clk to | |
222 | * make sure we have enough voltage to run dpp clk | |
223 | */ | |
224 | if (send_request_to_increase) { | |
225 | /*use dcfclk to request voltage*/ | |
a08ac5a6 | 226 | if (pp_smu && pp_smu->set_hard_min_fclk_by_freq && |
4fd994c4 FD |
227 | pp_smu->set_hard_min_dcfclk_by_freq && |
228 | pp_smu->set_min_deep_sleep_dcfclk) { | |
229 | ||
230 | pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz); | |
231 | pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz); | |
232 | pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz); | |
4fd994c4 | 233 | } |
4c5e8b54 DL |
234 | } |
235 | ||
236 | /* dcn1 dppclk is tied to dispclk */ | |
237 | /* program dispclk on = as a w/a for sleep resume clock ramping issues */ | |
84e7fc05 DL |
238 | if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz) |
239 | || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) { | |
240 | dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks); | |
241 | clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; | |
4c5e8b54 DL |
242 | |
243 | send_request_to_lower = true; | |
244 | } | |
245 | ||
246 | if (!send_request_to_increase && send_request_to_lower) { | |
247 | /*use dcfclk to request voltage*/ | |
a08ac5a6 | 248 | if (pp_smu && pp_smu->set_hard_min_fclk_by_freq && |
4fd994c4 FD |
249 | pp_smu->set_hard_min_dcfclk_by_freq && |
250 | pp_smu->set_min_deep_sleep_dcfclk) { | |
251 | ||
252 | pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz); | |
253 | pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz); | |
254 | pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz); | |
4fd994c4 | 255 | } |
4c5e8b54 DL |
256 | } |
257 | ||
4c5e8b54 DL |
258 | *smu_req_cur = smu_req; |
259 | } | |
84e7fc05 | 260 | static const struct clk_mgr_funcs dcn1_funcs = { |
4c5e8b54 DL |
261 | .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, |
262 | .update_clocks = dcn1_update_clocks | |
263 | }; | |
84e7fc05 | 264 | struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx) |
4c5e8b54 DL |
265 | { |
266 | struct dc_debug_options *debug = &ctx->dc->debug; | |
267 | struct dc_bios *bp = ctx->dc_bios; | |
268 | struct dc_firmware_info fw_info = { { 0 } }; | |
84e7fc05 | 269 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); |
4c5e8b54 | 270 | |
84e7fc05 | 271 | if (clk_mgr_dce == NULL) { |
4c5e8b54 DL |
272 | BREAK_TO_DEBUGGER(); |
273 | return NULL; | |
274 | } | |
275 | ||
84e7fc05 DL |
276 | clk_mgr_dce->base.ctx = ctx; |
277 | clk_mgr_dce->base.funcs = &dcn1_funcs; | |
4c5e8b54 | 278 | |
84e7fc05 | 279 | clk_mgr_dce->dfs_bypass_disp_clk = 0; |
4c5e8b54 | 280 | |
84e7fc05 DL |
281 | clk_mgr_dce->dprefclk_ss_percentage = 0; |
282 | clk_mgr_dce->dprefclk_ss_divider = 1000; | |
283 | clk_mgr_dce->ss_on_dprefclk = false; | |
4c5e8b54 | 284 | |
84e7fc05 | 285 | clk_mgr_dce->dprefclk_khz = 600000; |
4c5e8b54 | 286 | if (bp->integrated_info) |
84e7fc05 DL |
287 | clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; |
288 | if (clk_mgr_dce->dentist_vco_freq_khz == 0) { | |
4c5e8b54 | 289 | bp->funcs->get_firmware_info(bp, &fw_info); |
84e7fc05 DL |
290 | clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq; |
291 | if (clk_mgr_dce->dentist_vco_freq_khz == 0) | |
292 | clk_mgr_dce->dentist_vco_freq_khz = 3600000; | |
4c5e8b54 DL |
293 | } |
294 | ||
295 | if (!debug->disable_dfs_bypass && bp->integrated_info) | |
296 | if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) | |
84e7fc05 | 297 | clk_mgr_dce->dfs_bypass_enabled = true; |
4c5e8b54 | 298 | |
84e7fc05 | 299 | dce_clock_read_ss_info(clk_mgr_dce); |
4c5e8b54 | 300 | |
84e7fc05 | 301 | return &clk_mgr_dce->base; |
4c5e8b54 | 302 | } |
e2101675 FD |
303 | |
304 |