Commit | Line | Data |
---|---|---|
f7c1ed34 ML |
1 | /* |
2 | * Copyright 2018 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: AMD | |
23 | */ | |
24 | #include <linux/string.h> | |
25 | #include <linux/acpi.h> | |
26 | ||
27 | #include <drm/drmP.h> | |
28 | #include <drm/drm_crtc_helper.h> | |
29 | #include <drm/amdgpu_drm.h> | |
30 | #include "dm_services.h" | |
31 | #include "amdgpu.h" | |
32 | #include "amdgpu_dm.h" | |
33 | #include "amdgpu_dm_irq.h" | |
34 | #include "amdgpu_pm.h" | |
35 | #include "dm_pp_smu.h" | |
36 | #include "../../powerplay/inc/hwmgr.h" | |
f7c1ed34 ML |
37 | |
38 | ||
39 | bool dm_pp_apply_display_requirements( | |
40 | const struct dc_context *ctx, | |
41 | const struct dm_pp_display_configuration *pp_display_cfg) | |
42 | { | |
43 | struct amdgpu_device *adev = ctx->driver_context; | |
d4d5eace | 44 | int i; |
f7c1ed34 ML |
45 | |
46 | if (adev->pm.dpm_enabled) { | |
47 | ||
48 | memset(&adev->pm.pm_display_cfg, 0, | |
49 | sizeof(adev->pm.pm_display_cfg)); | |
50 | ||
51 | adev->pm.pm_display_cfg.cpu_cc6_disable = | |
52 | pp_display_cfg->cpu_cc6_disable; | |
53 | ||
54 | adev->pm.pm_display_cfg.cpu_pstate_disable = | |
55 | pp_display_cfg->cpu_pstate_disable; | |
56 | ||
57 | adev->pm.pm_display_cfg.cpu_pstate_separation_time = | |
58 | pp_display_cfg->cpu_pstate_separation_time; | |
59 | ||
60 | adev->pm.pm_display_cfg.nb_pstate_switch_disable = | |
61 | pp_display_cfg->nb_pstate_switch_disable; | |
62 | ||
63 | adev->pm.pm_display_cfg.num_display = | |
64 | pp_display_cfg->display_count; | |
65 | adev->pm.pm_display_cfg.num_path_including_non_display = | |
66 | pp_display_cfg->display_count; | |
67 | ||
68 | adev->pm.pm_display_cfg.min_core_set_clock = | |
69 | pp_display_cfg->min_engine_clock_khz/10; | |
70 | adev->pm.pm_display_cfg.min_core_set_clock_in_sr = | |
71 | pp_display_cfg->min_engine_clock_deep_sleep_khz/10; | |
72 | adev->pm.pm_display_cfg.min_mem_set_clock = | |
73 | pp_display_cfg->min_memory_clock_khz/10; | |
74 | ||
3180fb67 | 75 | adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk = |
76 | pp_display_cfg->min_engine_clock_deep_sleep_khz/10; | |
77 | adev->pm.pm_display_cfg.min_dcef_set_clk = | |
78 | pp_display_cfg->min_dcfclock_khz/10; | |
79 | ||
f7c1ed34 ML |
80 | adev->pm.pm_display_cfg.multi_monitor_in_sync = |
81 | pp_display_cfg->all_displays_in_sync; | |
82 | adev->pm.pm_display_cfg.min_vblank_time = | |
83 | pp_display_cfg->avail_mclk_switch_time_us; | |
84 | ||
85 | adev->pm.pm_display_cfg.display_clk = | |
86 | pp_display_cfg->disp_clk_khz/10; | |
87 | ||
88 | adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency = | |
89 | pp_display_cfg->avail_mclk_switch_time_in_disp_active_us; | |
90 | ||
91 | adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index; | |
92 | adev->pm.pm_display_cfg.line_time_in_us = | |
93 | pp_display_cfg->line_time_in_us; | |
94 | ||
95 | adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh; | |
96 | adev->pm.pm_display_cfg.crossfire_display_index = -1; | |
97 | adev->pm.pm_display_cfg.min_bus_bandwidth = 0; | |
98 | ||
d4d5eace | 99 | for (i = 0; i < pp_display_cfg->display_count; i++) { |
100 | const struct dm_pp_single_disp_config *dc_cfg = | |
101 | &pp_display_cfg->disp_configs[i]; | |
102 | adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; | |
103 | } | |
104 | ||
f7c1ed34 ML |
105 | /* TODO: complete implementation of |
106 | * pp_display_configuration_change(). | |
107 | * Follow example of: | |
108 | * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c | |
109 | * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */ | |
110 | if (adev->powerplay.pp_funcs->display_configuration_change) | |
111 | adev->powerplay.pp_funcs->display_configuration_change( | |
112 | adev->powerplay.pp_handle, | |
113 | &adev->pm.pm_display_cfg); | |
114 | ||
115 | /* TODO: replace by a separate call to 'apply display cfg'? */ | |
116 | amdgpu_pm_compute_clocks(adev); | |
117 | } | |
118 | ||
119 | return true; | |
120 | } | |
121 | ||
122 | static void get_default_clock_levels( | |
123 | enum dm_pp_clock_type clk_type, | |
124 | struct dm_pp_clock_levels *clks) | |
125 | { | |
126 | uint32_t disp_clks_in_khz[6] = { | |
127 | 300000, 400000, 496560, 626090, 685720, 757900 }; | |
128 | uint32_t sclks_in_khz[6] = { | |
129 | 300000, 360000, 423530, 514290, 626090, 720000 }; | |
130 | uint32_t mclks_in_khz[2] = { 333000, 800000 }; | |
131 | ||
132 | switch (clk_type) { | |
133 | case DM_PP_CLOCK_TYPE_DISPLAY_CLK: | |
134 | clks->num_levels = 6; | |
135 | memmove(clks->clocks_in_khz, disp_clks_in_khz, | |
136 | sizeof(disp_clks_in_khz)); | |
137 | break; | |
138 | case DM_PP_CLOCK_TYPE_ENGINE_CLK: | |
139 | clks->num_levels = 6; | |
140 | memmove(clks->clocks_in_khz, sclks_in_khz, | |
141 | sizeof(sclks_in_khz)); | |
142 | break; | |
143 | case DM_PP_CLOCK_TYPE_MEMORY_CLK: | |
144 | clks->num_levels = 2; | |
145 | memmove(clks->clocks_in_khz, mclks_in_khz, | |
146 | sizeof(mclks_in_khz)); | |
147 | break; | |
148 | default: | |
149 | clks->num_levels = 0; | |
150 | break; | |
151 | } | |
152 | } | |
153 | ||
154 | static enum amd_pp_clock_type dc_to_pp_clock_type( | |
155 | enum dm_pp_clock_type dm_pp_clk_type) | |
156 | { | |
157 | enum amd_pp_clock_type amd_pp_clk_type = 0; | |
158 | ||
159 | switch (dm_pp_clk_type) { | |
160 | case DM_PP_CLOCK_TYPE_DISPLAY_CLK: | |
161 | amd_pp_clk_type = amd_pp_disp_clock; | |
162 | break; | |
163 | case DM_PP_CLOCK_TYPE_ENGINE_CLK: | |
164 | amd_pp_clk_type = amd_pp_sys_clock; | |
165 | break; | |
166 | case DM_PP_CLOCK_TYPE_MEMORY_CLK: | |
167 | amd_pp_clk_type = amd_pp_mem_clock; | |
168 | break; | |
169 | case DM_PP_CLOCK_TYPE_DCEFCLK: | |
170 | amd_pp_clk_type = amd_pp_dcef_clock; | |
171 | break; | |
172 | case DM_PP_CLOCK_TYPE_DCFCLK: | |
173 | amd_pp_clk_type = amd_pp_dcf_clock; | |
174 | break; | |
175 | case DM_PP_CLOCK_TYPE_PIXELCLK: | |
176 | amd_pp_clk_type = amd_pp_pixel_clock; | |
177 | break; | |
178 | case DM_PP_CLOCK_TYPE_FCLK: | |
179 | amd_pp_clk_type = amd_pp_f_clock; | |
180 | break; | |
181 | case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK: | |
66917e56 | 182 | amd_pp_clk_type = amd_pp_phy_clock; |
183 | break; | |
184 | case DM_PP_CLOCK_TYPE_DPPCLK: | |
f7c1ed34 ML |
185 | amd_pp_clk_type = amd_pp_dpp_clock; |
186 | break; | |
187 | default: | |
188 | DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n", | |
189 | dm_pp_clk_type); | |
190 | break; | |
191 | } | |
192 | ||
193 | return amd_pp_clk_type; | |
194 | } | |
195 | ||
196 | static void pp_to_dc_clock_levels( | |
197 | const struct amd_pp_clocks *pp_clks, | |
198 | struct dm_pp_clock_levels *dc_clks, | |
199 | enum dm_pp_clock_type dc_clk_type) | |
200 | { | |
201 | uint32_t i; | |
202 | ||
203 | if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) { | |
204 | DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", | |
205 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), | |
206 | pp_clks->count, | |
207 | DM_PP_MAX_CLOCK_LEVELS); | |
208 | ||
209 | dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS; | |
210 | } else | |
211 | dc_clks->num_levels = pp_clks->count; | |
212 | ||
213 | DRM_INFO("DM_PPLIB: values for %s clock\n", | |
214 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); | |
215 | ||
216 | for (i = 0; i < dc_clks->num_levels; i++) { | |
217 | DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]); | |
23ec3d14 | 218 | dc_clks->clocks_in_khz[i] = pp_clks->clock[i]; |
f7c1ed34 ML |
219 | } |
220 | } | |
221 | ||
222 | static void pp_to_dc_clock_levels_with_latency( | |
223 | const struct pp_clock_levels_with_latency *pp_clks, | |
224 | struct dm_pp_clock_levels_with_latency *clk_level_info, | |
225 | enum dm_pp_clock_type dc_clk_type) | |
226 | { | |
227 | uint32_t i; | |
228 | ||
229 | if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { | |
230 | DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", | |
231 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), | |
232 | pp_clks->num_levels, | |
233 | DM_PP_MAX_CLOCK_LEVELS); | |
234 | ||
235 | clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; | |
236 | } else | |
237 | clk_level_info->num_levels = pp_clks->num_levels; | |
238 | ||
239 | DRM_DEBUG("DM_PPLIB: values for %s clock\n", | |
240 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); | |
241 | ||
242 | for (i = 0; i < clk_level_info->num_levels; i++) { | |
23ec3d14 RZ |
243 | DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz); |
244 | clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; | |
f7c1ed34 ML |
245 | clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us; |
246 | } | |
247 | } | |
248 | ||
249 | static void pp_to_dc_clock_levels_with_voltage( | |
250 | const struct pp_clock_levels_with_voltage *pp_clks, | |
251 | struct dm_pp_clock_levels_with_voltage *clk_level_info, | |
252 | enum dm_pp_clock_type dc_clk_type) | |
253 | { | |
254 | uint32_t i; | |
255 | ||
256 | if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { | |
257 | DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", | |
258 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), | |
259 | pp_clks->num_levels, | |
260 | DM_PP_MAX_CLOCK_LEVELS); | |
261 | ||
262 | clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; | |
263 | } else | |
264 | clk_level_info->num_levels = pp_clks->num_levels; | |
265 | ||
266 | DRM_INFO("DM_PPLIB: values for %s clock\n", | |
267 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); | |
268 | ||
269 | for (i = 0; i < clk_level_info->num_levels; i++) { | |
23ec3d14 RZ |
270 | DRM_INFO("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz); |
271 | clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; | |
f7c1ed34 ML |
272 | clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv; |
273 | } | |
274 | } | |
275 | ||
276 | bool dm_pp_get_clock_levels_by_type( | |
277 | const struct dc_context *ctx, | |
278 | enum dm_pp_clock_type clk_type, | |
279 | struct dm_pp_clock_levels *dc_clks) | |
280 | { | |
281 | struct amdgpu_device *adev = ctx->driver_context; | |
282 | void *pp_handle = adev->powerplay.pp_handle; | |
283 | struct amd_pp_clocks pp_clks = { 0 }; | |
284 | struct amd_pp_simple_clock_info validation_clks = { 0 }; | |
285 | uint32_t i; | |
286 | ||
287 | if (adev->powerplay.pp_funcs->get_clock_by_type) { | |
288 | if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle, | |
289 | dc_to_pp_clock_type(clk_type), &pp_clks)) { | |
290 | /* Error in pplib. Provide default values. */ | |
291 | get_default_clock_levels(clk_type, dc_clks); | |
292 | return true; | |
293 | } | |
294 | } | |
295 | ||
296 | pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type); | |
297 | ||
298 | if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) { | |
299 | if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks( | |
300 | pp_handle, &validation_clks)) { | |
301 | /* Error in pplib. Provide default values. */ | |
302 | DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); | |
303 | validation_clks.engine_max_clock = 72000; | |
304 | validation_clks.memory_max_clock = 80000; | |
305 | validation_clks.level = 0; | |
306 | } | |
307 | } | |
308 | ||
309 | DRM_INFO("DM_PPLIB: Validation clocks:\n"); | |
310 | DRM_INFO("DM_PPLIB: engine_max_clock: %d\n", | |
311 | validation_clks.engine_max_clock); | |
312 | DRM_INFO("DM_PPLIB: memory_max_clock: %d\n", | |
313 | validation_clks.memory_max_clock); | |
314 | DRM_INFO("DM_PPLIB: level : %d\n", | |
315 | validation_clks.level); | |
316 | ||
317 | /* Translate 10 kHz to kHz. */ | |
318 | validation_clks.engine_max_clock *= 10; | |
319 | validation_clks.memory_max_clock *= 10; | |
320 | ||
321 | /* Determine the highest non-boosted level from the Validation Clocks */ | |
322 | if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) { | |
323 | for (i = 0; i < dc_clks->num_levels; i++) { | |
324 | if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) { | |
325 | /* This clock is higher the validation clock. | |
326 | * Than means the previous one is the highest | |
327 | * non-boosted one. */ | |
328 | DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n", | |
329 | dc_clks->num_levels, i); | |
330 | dc_clks->num_levels = i > 0 ? i : 1; | |
331 | break; | |
332 | } | |
333 | } | |
334 | } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) { | |
335 | for (i = 0; i < dc_clks->num_levels; i++) { | |
336 | if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) { | |
337 | DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n", | |
338 | dc_clks->num_levels, i); | |
339 | dc_clks->num_levels = i > 0 ? i : 1; | |
340 | break; | |
341 | } | |
342 | } | |
343 | } | |
344 | ||
345 | return true; | |
346 | } | |
347 | ||
348 | bool dm_pp_get_clock_levels_by_type_with_latency( | |
349 | const struct dc_context *ctx, | |
350 | enum dm_pp_clock_type clk_type, | |
351 | struct dm_pp_clock_levels_with_latency *clk_level_info) | |
352 | { | |
353 | struct amdgpu_device *adev = ctx->driver_context; | |
354 | void *pp_handle = adev->powerplay.pp_handle; | |
355 | struct pp_clock_levels_with_latency pp_clks = { 0 }; | |
356 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | |
357 | ||
358 | if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency) | |
359 | return false; | |
360 | ||
361 | if (pp_funcs->get_clock_by_type_with_latency(pp_handle, | |
362 | dc_to_pp_clock_type(clk_type), | |
363 | &pp_clks)) | |
364 | return false; | |
365 | ||
366 | pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type); | |
367 | ||
368 | return true; | |
369 | } | |
370 | ||
371 | bool dm_pp_get_clock_levels_by_type_with_voltage( | |
372 | const struct dc_context *ctx, | |
373 | enum dm_pp_clock_type clk_type, | |
374 | struct dm_pp_clock_levels_with_voltage *clk_level_info) | |
375 | { | |
376 | struct amdgpu_device *adev = ctx->driver_context; | |
377 | void *pp_handle = adev->powerplay.pp_handle; | |
378 | struct pp_clock_levels_with_voltage pp_clk_info = {0}; | |
379 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | |
380 | ||
381 | if (pp_funcs->get_clock_by_type_with_voltage(pp_handle, | |
382 | dc_to_pp_clock_type(clk_type), | |
383 | &pp_clk_info)) | |
384 | return false; | |
385 | ||
386 | pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type); | |
387 | ||
388 | return true; | |
389 | } | |
390 | ||
391 | bool dm_pp_notify_wm_clock_changes( | |
392 | const struct dc_context *ctx, | |
393 | struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges) | |
394 | { | |
395 | /* TODO: to be implemented */ | |
396 | return false; | |
397 | } | |
398 | ||
399 | bool dm_pp_apply_power_level_change_request( | |
400 | const struct dc_context *ctx, | |
401 | struct dm_pp_power_level_change_request *level_change_req) | |
402 | { | |
403 | /* TODO: to be implemented */ | |
404 | return false; | |
405 | } | |
406 | ||
407 | bool dm_pp_apply_clock_for_voltage_request( | |
408 | const struct dc_context *ctx, | |
409 | struct dm_pp_clock_for_voltage_req *clock_for_voltage_req) | |
410 | { | |
411 | struct amdgpu_device *adev = ctx->driver_context; | |
412 | struct pp_display_clock_request pp_clock_request = {0}; | |
413 | int ret = 0; | |
414 | ||
415 | pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type); | |
416 | pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz; | |
417 | ||
418 | if (!pp_clock_request.clock_type) | |
419 | return false; | |
420 | ||
421 | if (adev->powerplay.pp_funcs->display_clock_voltage_request) | |
422 | ret = adev->powerplay.pp_funcs->display_clock_voltage_request( | |
423 | adev->powerplay.pp_handle, | |
424 | &pp_clock_request); | |
425 | if (ret) | |
426 | return false; | |
427 | return true; | |
428 | } | |
429 | ||
430 | bool dm_pp_get_static_clocks( | |
431 | const struct dc_context *ctx, | |
432 | struct dm_pp_static_clock_info *static_clk_info) | |
433 | { | |
434 | struct amdgpu_device *adev = ctx->driver_context; | |
435 | struct amd_pp_clock_info pp_clk_info = {0}; | |
436 | int ret = 0; | |
437 | ||
438 | if (adev->powerplay.pp_funcs->get_current_clocks) | |
439 | ret = adev->powerplay.pp_funcs->get_current_clocks( | |
440 | adev->powerplay.pp_handle, | |
441 | &pp_clk_info); | |
442 | if (ret) | |
443 | return false; | |
444 | ||
445 | static_clk_info->max_clocks_state = pp_clk_info.max_clocks_state; | |
23ec3d14 RZ |
446 | static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock; |
447 | static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock; | |
f7c1ed34 ML |
448 | |
449 | return true; | |
450 | } | |
451 | ||
452 | void pp_rv_set_display_requirement(struct pp_smu *pp, | |
453 | struct pp_smu_display_requirement_rv *req) | |
454 | { | |
455 | struct amdgpu_device *adev = pp->ctx->driver_context; | |
456 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; | |
457 | int ret = 0; | |
458 | if (hwmgr->hwmgr_func->set_deep_sleep_dcefclk) | |
459 | ret = hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, req->hard_min_dcefclk_khz/10); | |
460 | if (hwmgr->hwmgr_func->set_active_display_count) | |
461 | ret = hwmgr->hwmgr_func->set_active_display_count(hwmgr, req->display_count); | |
462 | ||
463 | //store_cc6 is not yet implemented in SMU level | |
464 | } | |
465 | ||
466 | void pp_rv_set_wm_ranges(struct pp_smu *pp, | |
467 | struct pp_smu_wm_range_sets *ranges) | |
468 | { | |
469 | struct amdgpu_device *adev = pp->ctx->driver_context; | |
470 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; | |
471 | struct pp_wm_sets_with_clock_ranges_soc15 ranges_soc15 = {0}; | |
472 | int i = 0; | |
473 | ||
474 | if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges || | |
475 | !pp || !ranges) | |
476 | return; | |
477 | ||
478 | //not entirely sure if thats a correct assignment | |
479 | ranges_soc15.num_wm_sets_dmif = ranges->num_reader_wm_sets; | |
480 | ranges_soc15.num_wm_sets_mcif = ranges->num_writer_wm_sets; | |
481 | ||
482 | for (i = 0; i < ranges_soc15.num_wm_sets_dmif; i++) { | |
483 | if (ranges->reader_wm_sets[i].wm_inst > 3) | |
484 | ranges_soc15.wm_sets_dmif[i].wm_set_id = DC_WM_SET_A; | |
485 | else | |
486 | ranges_soc15.wm_sets_dmif[i].wm_set_id = | |
487 | ranges->reader_wm_sets[i].wm_inst; | |
488 | ranges_soc15.wm_sets_dmif[i].wm_max_dcefclk_in_khz = | |
489 | ranges->reader_wm_sets[i].max_drain_clk_khz; | |
490 | ranges_soc15.wm_sets_dmif[i].wm_min_dcefclk_in_khz = | |
491 | ranges->reader_wm_sets[i].min_drain_clk_khz; | |
492 | ranges_soc15.wm_sets_dmif[i].wm_max_memclk_in_khz = | |
493 | ranges->reader_wm_sets[i].max_fill_clk_khz; | |
494 | ranges_soc15.wm_sets_dmif[i].wm_min_memclk_in_khz = | |
495 | ranges->reader_wm_sets[i].min_fill_clk_khz; | |
496 | } | |
497 | ||
498 | for (i = 0; i < ranges_soc15.num_wm_sets_mcif; i++) { | |
499 | if (ranges->writer_wm_sets[i].wm_inst > 3) | |
500 | ranges_soc15.wm_sets_dmif[i].wm_set_id = DC_WM_SET_A; | |
501 | else | |
502 | ranges_soc15.wm_sets_mcif[i].wm_set_id = | |
503 | ranges->writer_wm_sets[i].wm_inst; | |
504 | ranges_soc15.wm_sets_mcif[i].wm_max_socclk_in_khz = | |
505 | ranges->writer_wm_sets[i].max_fill_clk_khz; | |
506 | ranges_soc15.wm_sets_mcif[i].wm_min_socclk_in_khz = | |
507 | ranges->writer_wm_sets[i].min_fill_clk_khz; | |
508 | ranges_soc15.wm_sets_mcif[i].wm_max_memclk_in_khz = | |
509 | ranges->writer_wm_sets[i].max_fill_clk_khz; | |
510 | ranges_soc15.wm_sets_mcif[i].wm_min_memclk_in_khz = | |
511 | ranges->writer_wm_sets[i].min_fill_clk_khz; | |
512 | } | |
513 | ||
514 | hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr, &ranges_soc15); | |
515 | ||
516 | } | |
517 | ||
518 | void pp_rv_set_pme_wa_enable(struct pp_smu *pp) | |
519 | { | |
520 | struct amdgpu_device *adev = pp->ctx->driver_context; | |
521 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; | |
522 | ||
523 | if (hwmgr->hwmgr_func->smus_notify_pwe) | |
524 | hwmgr->hwmgr_func->smus_notify_pwe(hwmgr); | |
525 | } | |
526 | ||
527 | void dm_pp_get_funcs_rv( | |
528 | struct dc_context *ctx, | |
529 | struct pp_smu_funcs_rv *funcs) | |
530 | { | |
531 | funcs->pp_smu.ctx = ctx; | |
532 | funcs->set_display_requirement = pp_rv_set_display_requirement; | |
533 | funcs->set_wm_ranges = pp_rv_set_wm_ranges; | |
534 | funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable; | |
535 | } |