drm/amdgpu: fix VM faults caused by vm_grab_id() v4
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / ci_dpm.c
CommitLineData
a2e73f56
AD
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "amdgpu_pm.h"
28#include "amdgpu_ucode.h"
29#include "cikd.h"
30#include "amdgpu_dpm.h"
31#include "ci_dpm.h"
32#include "gfx_v7_0.h"
33#include "atom.h"
34#include <linux/seq_file.h>
35
36#include "smu/smu_7_0_1_d.h"
37#include "smu/smu_7_0_1_sh_mask.h"
38
39#include "dce/dce_8_0_d.h"
40#include "dce/dce_8_0_sh_mask.h"
41
42#include "bif/bif_4_1_d.h"
43#include "bif/bif_4_1_sh_mask.h"
44
45#include "gca/gfx_7_2_d.h"
46#include "gca/gfx_7_2_sh_mask.h"
47
48#include "gmc/gmc_7_1_d.h"
49#include "gmc/gmc_7_1_sh_mask.h"
50
51MODULE_FIRMWARE("radeon/bonaire_smc.bin");
52MODULE_FIRMWARE("radeon/hawaii_smc.bin");
53
54#define MC_CG_ARB_FREQ_F0 0x0a
55#define MC_CG_ARB_FREQ_F1 0x0b
56#define MC_CG_ARB_FREQ_F2 0x0c
57#define MC_CG_ARB_FREQ_F3 0x0d
58
59#define SMC_RAM_END 0x40000
60
61#define VOLTAGE_SCALE 4
62#define VOLTAGE_VID_OFFSET_SCALE1 625
63#define VOLTAGE_VID_OFFSET_SCALE2 100
64
65static const struct ci_pt_defaults defaults_hawaii_xt =
66{
67 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
68 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
69 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
70};
71
72static const struct ci_pt_defaults defaults_hawaii_pro =
73{
74 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
75 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
76 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
77};
78
79static const struct ci_pt_defaults defaults_bonaire_xt =
80{
81 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
82 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
83 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
84};
85
86static const struct ci_pt_defaults defaults_bonaire_pro =
87{
88 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
89 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
90 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
91};
92
93static const struct ci_pt_defaults defaults_saturn_xt =
94{
95 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
96 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
97 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
98};
99
100static const struct ci_pt_defaults defaults_saturn_pro =
101{
102 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
103 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
104 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
105};
106
107static const struct ci_pt_config_reg didt_config_ci[] =
108{
109 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
147 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
148 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
158 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
159 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
160 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
161 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
162 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
164 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
165 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
166 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
167 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
168 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
169 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
176 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
177 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
178 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
179 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
180 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
181 { 0xFFFFFFFF }
182};
183
184static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
185{
186 return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
187}
188
189#define MC_CG_ARB_FREQ_F0 0x0a
190#define MC_CG_ARB_FREQ_F1 0x0b
191#define MC_CG_ARB_FREQ_F2 0x0c
192#define MC_CG_ARB_FREQ_F3 0x0d
193
194static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
195 u32 arb_freq_src, u32 arb_freq_dest)
196{
197 u32 mc_arb_dram_timing;
198 u32 mc_arb_dram_timing2;
199 u32 burst_time;
200 u32 mc_cg_config;
201
202 switch (arb_freq_src) {
203 case MC_CG_ARB_FREQ_F0:
204 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
205 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
206 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
207 MC_ARB_BURST_TIME__STATE0__SHIFT;
208 break;
209 case MC_CG_ARB_FREQ_F1:
210 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING_1);
211 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
212 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
213 MC_ARB_BURST_TIME__STATE1__SHIFT;
214 break;
215 default:
216 return -EINVAL;
217 }
218
219 switch (arb_freq_dest) {
220 case MC_CG_ARB_FREQ_F0:
221 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
222 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
223 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
224 ~MC_ARB_BURST_TIME__STATE0_MASK);
225 break;
226 case MC_CG_ARB_FREQ_F1:
227 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
228 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
229 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
230 ~MC_ARB_BURST_TIME__STATE1_MASK);
231 break;
232 default:
233 return -EINVAL;
234 }
235
236 mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
237 WREG32(mmMC_CG_CONFIG, mc_cg_config);
238 WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
239 ~MC_ARB_CG__CG_ARB_REQ_MASK);
240
241 return 0;
242}
243
244static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
245{
246 u8 mc_para_index;
247
248 if (memory_clock < 10000)
249 mc_para_index = 0;
250 else if (memory_clock >= 80000)
251 mc_para_index = 0x0f;
252 else
253 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
254 return mc_para_index;
255}
256
257static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
258{
259 u8 mc_para_index;
260
261 if (strobe_mode) {
262 if (memory_clock < 12500)
263 mc_para_index = 0x00;
264 else if (memory_clock > 47500)
265 mc_para_index = 0x0f;
266 else
267 mc_para_index = (u8)((memory_clock - 10000) / 2500);
268 } else {
269 if (memory_clock < 65000)
270 mc_para_index = 0x00;
271 else if (memory_clock > 135000)
272 mc_para_index = 0x0f;
273 else
274 mc_para_index = (u8)((memory_clock - 60000) / 5000);
275 }
276 return mc_para_index;
277}
278
279static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
280 u32 max_voltage_steps,
281 struct atom_voltage_table *voltage_table)
282{
283 unsigned int i, diff;
284
285 if (voltage_table->count <= max_voltage_steps)
286 return;
287
288 diff = voltage_table->count - max_voltage_steps;
289
290 for (i = 0; i < max_voltage_steps; i++)
291 voltage_table->entries[i] = voltage_table->entries[i + diff];
292
293 voltage_table->count = max_voltage_steps;
294}
295
296static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
297 struct atom_voltage_table_entry *voltage_table,
298 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
299static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
300static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
301 u32 target_tdp);
302static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
303static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
304static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
305
306static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
307 PPSMC_Msg msg, u32 parameter);
308static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
309static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
310
311static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
312{
313 struct ci_power_info *pi = adev->pm.dpm.priv;
314
315 return pi;
316}
317
318static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
319{
320 struct ci_ps *ps = rps->ps_priv;
321
322 return ps;
323}
324
325static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
326{
327 struct ci_power_info *pi = ci_get_pi(adev);
328
329 switch (adev->pdev->device) {
330 case 0x6649:
331 case 0x6650:
332 case 0x6651:
333 case 0x6658:
334 case 0x665C:
335 case 0x665D:
336 default:
337 pi->powertune_defaults = &defaults_bonaire_xt;
338 break;
339 case 0x6640:
340 case 0x6641:
341 case 0x6646:
342 case 0x6647:
343 pi->powertune_defaults = &defaults_saturn_xt;
344 break;
345 case 0x67B8:
346 case 0x67B0:
347 pi->powertune_defaults = &defaults_hawaii_xt;
348 break;
349 case 0x67BA:
350 case 0x67B1:
351 pi->powertune_defaults = &defaults_hawaii_pro;
352 break;
353 case 0x67A0:
354 case 0x67A1:
355 case 0x67A2:
356 case 0x67A8:
357 case 0x67A9:
358 case 0x67AA:
359 case 0x67B9:
360 case 0x67BE:
361 pi->powertune_defaults = &defaults_bonaire_xt;
362 break;
363 }
364
365 pi->dte_tj_offset = 0;
366
367 pi->caps_power_containment = true;
368 pi->caps_cac = false;
369 pi->caps_sq_ramping = false;
370 pi->caps_db_ramping = false;
371 pi->caps_td_ramping = false;
372 pi->caps_tcp_ramping = false;
373
374 if (pi->caps_power_containment) {
375 pi->caps_cac = true;
376 if (adev->asic_type == CHIP_HAWAII)
377 pi->enable_bapm_feature = false;
378 else
379 pi->enable_bapm_feature = true;
380 pi->enable_tdc_limit_feature = true;
381 pi->enable_pkg_pwr_tracking_feature = true;
382 }
383}
384
385static u8 ci_convert_to_vid(u16 vddc)
386{
387 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
388}
389
390static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
391{
392 struct ci_power_info *pi = ci_get_pi(adev);
393 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
394 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
395 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
396 u32 i;
397
398 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
399 return -EINVAL;
400 if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
401 return -EINVAL;
402 if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
403 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
404 return -EINVAL;
405
406 for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
407 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
408 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
409 hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
410 hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
411 } else {
412 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
413 hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
414 }
415 }
416 return 0;
417}
418
419static int ci_populate_vddc_vid(struct amdgpu_device *adev)
420{
421 struct ci_power_info *pi = ci_get_pi(adev);
422 u8 *vid = pi->smc_powertune_table.VddCVid;
423 u32 i;
424
425 if (pi->vddc_voltage_table.count > 8)
426 return -EINVAL;
427
428 for (i = 0; i < pi->vddc_voltage_table.count; i++)
429 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
430
431 return 0;
432}
433
434static int ci_populate_svi_load_line(struct amdgpu_device *adev)
435{
436 struct ci_power_info *pi = ci_get_pi(adev);
437 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
438
439 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
440 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
441 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
442 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
443
444 return 0;
445}
446
447static int ci_populate_tdc_limit(struct amdgpu_device *adev)
448{
449 struct ci_power_info *pi = ci_get_pi(adev);
450 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
451 u16 tdc_limit;
452
453 tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
454 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
455 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
456 pt_defaults->tdc_vddc_throttle_release_limit_perc;
457 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
458
459 return 0;
460}
461
462static int ci_populate_dw8(struct amdgpu_device *adev)
463{
464 struct ci_power_info *pi = ci_get_pi(adev);
465 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
466 int ret;
467
468 ret = amdgpu_ci_read_smc_sram_dword(adev,
469 SMU7_FIRMWARE_HEADER_LOCATION +
470 offsetof(SMU7_Firmware_Header, PmFuseTable) +
471 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
472 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
473 pi->sram_end);
474 if (ret)
475 return -EINVAL;
476 else
477 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
478
479 return 0;
480}
481
482static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
483{
484 struct ci_power_info *pi = ci_get_pi(adev);
485
486 if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
487 (adev->pm.dpm.fan.fan_output_sensitivity == 0))
488 adev->pm.dpm.fan.fan_output_sensitivity =
489 adev->pm.dpm.fan.default_fan_output_sensitivity;
490
491 pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
492 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
493
494 return 0;
495}
496
497static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
498{
499 struct ci_power_info *pi = ci_get_pi(adev);
500 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
501 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
502 int i, min, max;
503
504 min = max = hi_vid[0];
505 for (i = 0; i < 8; i++) {
506 if (0 != hi_vid[i]) {
507 if (min > hi_vid[i])
508 min = hi_vid[i];
509 if (max < hi_vid[i])
510 max = hi_vid[i];
511 }
512
513 if (0 != lo_vid[i]) {
514 if (min > lo_vid[i])
515 min = lo_vid[i];
516 if (max < lo_vid[i])
517 max = lo_vid[i];
518 }
519 }
520
521 if ((min == 0) || (max == 0))
522 return -EINVAL;
523 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
524 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
525
526 return 0;
527}
528
529static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
530{
531 struct ci_power_info *pi = ci_get_pi(adev);
532 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
533 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
534 struct amdgpu_cac_tdp_table *cac_tdp_table =
535 adev->pm.dpm.dyn_state.cac_tdp_table;
536
537 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
538 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
539
540 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
541 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
542
543 return 0;
544}
545
546static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
547{
548 struct ci_power_info *pi = ci_get_pi(adev);
549 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
550 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
551 struct amdgpu_cac_tdp_table *cac_tdp_table =
552 adev->pm.dpm.dyn_state.cac_tdp_table;
553 struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
554 int i, j, k;
555 const u16 *def1;
556 const u16 *def2;
557
558 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
559 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
560
561 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
562 dpm_table->GpuTjMax =
563 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
564 dpm_table->GpuTjHyst = 8;
565
566 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
567
568 if (ppm) {
569 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
570 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
571 } else {
572 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
573 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
574 }
575
576 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
577 def1 = pt_defaults->bapmti_r;
578 def2 = pt_defaults->bapmti_rc;
579
580 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
581 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
582 for (k = 0; k < SMU7_DTE_SINKS; k++) {
583 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
584 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
585 def1++;
586 def2++;
587 }
588 }
589 }
590
591 return 0;
592}
593
594static int ci_populate_pm_base(struct amdgpu_device *adev)
595{
596 struct ci_power_info *pi = ci_get_pi(adev);
597 u32 pm_fuse_table_offset;
598 int ret;
599
600 if (pi->caps_power_containment) {
601 ret = amdgpu_ci_read_smc_sram_dword(adev,
602 SMU7_FIRMWARE_HEADER_LOCATION +
603 offsetof(SMU7_Firmware_Header, PmFuseTable),
604 &pm_fuse_table_offset, pi->sram_end);
605 if (ret)
606 return ret;
607 ret = ci_populate_bapm_vddc_vid_sidd(adev);
608 if (ret)
609 return ret;
610 ret = ci_populate_vddc_vid(adev);
611 if (ret)
612 return ret;
613 ret = ci_populate_svi_load_line(adev);
614 if (ret)
615 return ret;
616 ret = ci_populate_tdc_limit(adev);
617 if (ret)
618 return ret;
619 ret = ci_populate_dw8(adev);
620 if (ret)
621 return ret;
622 ret = ci_populate_fuzzy_fan(adev);
623 if (ret)
624 return ret;
625 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
626 if (ret)
627 return ret;
628 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
629 if (ret)
630 return ret;
631 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
632 (u8 *)&pi->smc_powertune_table,
633 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
634 if (ret)
635 return ret;
636 }
637
638 return 0;
639}
640
641static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
642{
643 struct ci_power_info *pi = ci_get_pi(adev);
644 u32 data;
645
646 if (pi->caps_sq_ramping) {
647 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
648 if (enable)
649 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
650 else
651 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
652 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
653 }
654
655 if (pi->caps_db_ramping) {
656 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
657 if (enable)
658 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
659 else
660 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
661 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
662 }
663
664 if (pi->caps_td_ramping) {
665 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
666 if (enable)
667 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
668 else
669 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
670 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
671 }
672
673 if (pi->caps_tcp_ramping) {
674 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
675 if (enable)
676 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
677 else
678 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
679 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
680 }
681}
682
683static int ci_program_pt_config_registers(struct amdgpu_device *adev,
684 const struct ci_pt_config_reg *cac_config_regs)
685{
686 const struct ci_pt_config_reg *config_regs = cac_config_regs;
687 u32 data;
688 u32 cache = 0;
689
690 if (config_regs == NULL)
691 return -EINVAL;
692
693 while (config_regs->offset != 0xFFFFFFFF) {
694 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
695 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
696 } else {
697 switch (config_regs->type) {
698 case CISLANDS_CONFIGREG_SMC_IND:
699 data = RREG32_SMC(config_regs->offset);
700 break;
701 case CISLANDS_CONFIGREG_DIDT_IND:
702 data = RREG32_DIDT(config_regs->offset);
703 break;
704 default:
705 data = RREG32(config_regs->offset);
706 break;
707 }
708
709 data &= ~config_regs->mask;
710 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
711 data |= cache;
712
713 switch (config_regs->type) {
714 case CISLANDS_CONFIGREG_SMC_IND:
715 WREG32_SMC(config_regs->offset, data);
716 break;
717 case CISLANDS_CONFIGREG_DIDT_IND:
718 WREG32_DIDT(config_regs->offset, data);
719 break;
720 default:
721 WREG32(config_regs->offset, data);
722 break;
723 }
724 cache = 0;
725 }
726 config_regs++;
727 }
728 return 0;
729}
730
731static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
732{
733 struct ci_power_info *pi = ci_get_pi(adev);
734 int ret;
735
736 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
737 pi->caps_td_ramping || pi->caps_tcp_ramping) {
738 gfx_v7_0_enter_rlc_safe_mode(adev);
739
740 if (enable) {
741 ret = ci_program_pt_config_registers(adev, didt_config_ci);
742 if (ret) {
743 gfx_v7_0_exit_rlc_safe_mode(adev);
744 return ret;
745 }
746 }
747
748 ci_do_enable_didt(adev, enable);
749
750 gfx_v7_0_exit_rlc_safe_mode(adev);
751 }
752
753 return 0;
754}
755
756static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
757{
758 struct ci_power_info *pi = ci_get_pi(adev);
759 PPSMC_Result smc_result;
760 int ret = 0;
761
762 if (enable) {
763 pi->power_containment_features = 0;
764 if (pi->caps_power_containment) {
765 if (pi->enable_bapm_feature) {
766 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
767 if (smc_result != PPSMC_Result_OK)
768 ret = -EINVAL;
769 else
770 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
771 }
772
773 if (pi->enable_tdc_limit_feature) {
774 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
775 if (smc_result != PPSMC_Result_OK)
776 ret = -EINVAL;
777 else
778 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
779 }
780
781 if (pi->enable_pkg_pwr_tracking_feature) {
782 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
783 if (smc_result != PPSMC_Result_OK) {
784 ret = -EINVAL;
785 } else {
786 struct amdgpu_cac_tdp_table *cac_tdp_table =
787 adev->pm.dpm.dyn_state.cac_tdp_table;
788 u32 default_pwr_limit =
789 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
790
791 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
792
793 ci_set_power_limit(adev, default_pwr_limit);
794 }
795 }
796 }
797 } else {
798 if (pi->caps_power_containment && pi->power_containment_features) {
799 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
800 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
801
802 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
803 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
804
805 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
806 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
807 pi->power_containment_features = 0;
808 }
809 }
810
811 return ret;
812}
813
814static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
815{
816 struct ci_power_info *pi = ci_get_pi(adev);
817 PPSMC_Result smc_result;
818 int ret = 0;
819
820 if (pi->caps_cac) {
821 if (enable) {
822 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
823 if (smc_result != PPSMC_Result_OK) {
824 ret = -EINVAL;
825 pi->cac_enabled = false;
826 } else {
827 pi->cac_enabled = true;
828 }
829 } else if (pi->cac_enabled) {
830 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
831 pi->cac_enabled = false;
832 }
833 }
834
835 return ret;
836}
837
838static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
839 bool enable)
840{
841 struct ci_power_info *pi = ci_get_pi(adev);
842 PPSMC_Result smc_result = PPSMC_Result_OK;
843
844 if (pi->thermal_sclk_dpm_enabled) {
845 if (enable)
846 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
847 else
848 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
849 }
850
851 if (smc_result == PPSMC_Result_OK)
852 return 0;
853 else
854 return -EINVAL;
855}
856
857static int ci_power_control_set_level(struct amdgpu_device *adev)
858{
859 struct ci_power_info *pi = ci_get_pi(adev);
860 struct amdgpu_cac_tdp_table *cac_tdp_table =
861 adev->pm.dpm.dyn_state.cac_tdp_table;
862 s32 adjust_percent;
863 s32 target_tdp;
864 int ret = 0;
865 bool adjust_polarity = false; /* ??? */
866
867 if (pi->caps_power_containment) {
868 adjust_percent = adjust_polarity ?
869 adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
870 target_tdp = ((100 + adjust_percent) *
871 (s32)cac_tdp_table->configurable_tdp) / 100;
872
873 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
874 }
875
876 return ret;
877}
878
879static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
880{
881 struct ci_power_info *pi = ci_get_pi(adev);
882
883 if (pi->uvd_power_gated == gate)
884 return;
885
886 pi->uvd_power_gated = gate;
887
888 ci_update_uvd_dpm(adev, gate);
889}
890
891static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
892{
893 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
81c59f54 894 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
a2e73f56
AD
895
896 if (vblank_time < switch_limit)
897 return true;
898 else
899 return false;
900
901}
902
903static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
904 struct amdgpu_ps *rps)
905{
906 struct ci_ps *ps = ci_get_ps(rps);
907 struct ci_power_info *pi = ci_get_pi(adev);
908 struct amdgpu_clock_and_voltage_limits *max_limits;
909 bool disable_mclk_switching;
910 u32 sclk, mclk;
911 int i;
912
913 if (rps->vce_active) {
914 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
915 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
916 } else {
917 rps->evclk = 0;
918 rps->ecclk = 0;
919 }
920
921 if ((adev->pm.dpm.new_active_crtc_count > 1) ||
922 ci_dpm_vblank_too_short(adev))
923 disable_mclk_switching = true;
924 else
925 disable_mclk_switching = false;
926
927 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
928 pi->battery_state = true;
929 else
930 pi->battery_state = false;
931
932 if (adev->pm.dpm.ac_power)
933 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
934 else
935 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
936
937 if (adev->pm.dpm.ac_power == false) {
938 for (i = 0; i < ps->performance_level_count; i++) {
939 if (ps->performance_levels[i].mclk > max_limits->mclk)
940 ps->performance_levels[i].mclk = max_limits->mclk;
941 if (ps->performance_levels[i].sclk > max_limits->sclk)
942 ps->performance_levels[i].sclk = max_limits->sclk;
943 }
944 }
945
946 /* XXX validate the min clocks required for display */
947
948 if (disable_mclk_switching) {
949 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
950 sclk = ps->performance_levels[0].sclk;
951 } else {
952 mclk = ps->performance_levels[0].mclk;
953 sclk = ps->performance_levels[0].sclk;
954 }
955
956 if (rps->vce_active) {
957 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
958 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
959 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
960 mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
961 }
962
963 ps->performance_levels[0].sclk = sclk;
964 ps->performance_levels[0].mclk = mclk;
965
966 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
967 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
968
969 if (disable_mclk_switching) {
970 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
971 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
972 } else {
973 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
974 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
975 }
976}
977
978static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
979 int min_temp, int max_temp)
980{
981 int low_temp = 0 * 1000;
982 int high_temp = 255 * 1000;
983 u32 tmp;
984
985 if (low_temp < min_temp)
986 low_temp = min_temp;
987 if (high_temp > max_temp)
988 high_temp = max_temp;
989 if (high_temp < low_temp) {
990 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
991 return -EINVAL;
992 }
993
994 tmp = RREG32_SMC(ixCG_THERMAL_INT);
995 tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
996 tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
997 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
998 WREG32_SMC(ixCG_THERMAL_INT, tmp);
999
1000#if 0
1001 /* XXX: need to figure out how to handle this properly */
1002 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1003 tmp &= DIG_THERM_DPM_MASK;
1004 tmp |= DIG_THERM_DPM(high_temp / 1000);
1005 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1006#endif
1007
1008 adev->pm.dpm.thermal.min_temp = low_temp;
1009 adev->pm.dpm.thermal.max_temp = high_temp;
1010 return 0;
1011}
1012
1013static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1014 bool enable)
1015{
1016 u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1017 PPSMC_Result result;
1018
1019 if (enable) {
1020 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1021 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1022 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1023 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1024 if (result != PPSMC_Result_OK) {
1025 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1026 return -EINVAL;
1027 }
1028 } else {
1029 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1030 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1031 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1032 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1033 if (result != PPSMC_Result_OK) {
1034 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1035 return -EINVAL;
1036 }
1037 }
1038
1039 return 0;
1040}
1041
1042static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1043{
1044 struct ci_power_info *pi = ci_get_pi(adev);
1045 u32 tmp;
1046
1047 if (pi->fan_ctrl_is_in_default_mode) {
1048 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1049 >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1050 pi->fan_ctrl_default_mode = tmp;
1051 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1052 >> CG_FDO_CTRL2__TMIN__SHIFT;
1053 pi->t_min = tmp;
1054 pi->fan_ctrl_is_in_default_mode = false;
1055 }
1056
1057 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1058 tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1059 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1060
1061 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1062 tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1063 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1064}
1065
1066static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1067{
1068 struct ci_power_info *pi = ci_get_pi(adev);
1069 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1070 u32 duty100;
1071 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1072 u16 fdo_min, slope1, slope2;
1073 u32 reference_clock, tmp;
1074 int ret;
1075 u64 tmp64;
1076
1077 if (!pi->fan_table_start) {
1078 adev->pm.dpm.fan.ucode_fan_control = false;
1079 return 0;
1080 }
1081
1082 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1083 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1084
1085 if (duty100 == 0) {
1086 adev->pm.dpm.fan.ucode_fan_control = false;
1087 return 0;
1088 }
1089
1090 tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1091 do_div(tmp64, 10000);
1092 fdo_min = (u16)tmp64;
1093
1094 t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1095 t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1096
1097 pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1098 pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1099
1100 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1101 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1102
1103 fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1104 fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1105 fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1106
1107 fan_table.Slope1 = cpu_to_be16(slope1);
1108 fan_table.Slope2 = cpu_to_be16(slope2);
1109
1110 fan_table.FdoMin = cpu_to_be16(fdo_min);
1111
1112 fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1113
1114 fan_table.HystUp = cpu_to_be16(1);
1115
1116 fan_table.HystSlope = cpu_to_be16(1);
1117
1118 fan_table.TempRespLim = cpu_to_be16(5);
1119
1120 reference_clock = amdgpu_asic_get_xclk(adev);
1121
1122 fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1123 reference_clock) / 1600);
1124
1125 fan_table.FdoMax = cpu_to_be16((u16)duty100);
1126
1127 tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1128 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1129 fan_table.TempSrc = (uint8_t)tmp;
1130
1131 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1132 pi->fan_table_start,
1133 (u8 *)(&fan_table),
1134 sizeof(fan_table),
1135 pi->sram_end);
1136
1137 if (ret) {
1138 DRM_ERROR("Failed to load fan table to the SMC.");
1139 adev->pm.dpm.fan.ucode_fan_control = false;
1140 }
1141
1142 return 0;
1143}
1144
1145static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1146{
1147 struct ci_power_info *pi = ci_get_pi(adev);
1148 PPSMC_Result ret;
1149
1150 if (pi->caps_od_fuzzy_fan_control_support) {
1151 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1152 PPSMC_StartFanControl,
1153 FAN_CONTROL_FUZZY);
1154 if (ret != PPSMC_Result_OK)
1155 return -EINVAL;
1156 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1157 PPSMC_MSG_SetFanPwmMax,
1158 adev->pm.dpm.fan.default_max_fan_pwm);
1159 if (ret != PPSMC_Result_OK)
1160 return -EINVAL;
1161 } else {
1162 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1163 PPSMC_StartFanControl,
1164 FAN_CONTROL_TABLE);
1165 if (ret != PPSMC_Result_OK)
1166 return -EINVAL;
1167 }
1168
1169 pi->fan_is_controlled_by_smc = true;
1170 return 0;
1171}
1172
1173
1174static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1175{
1176 PPSMC_Result ret;
1177 struct ci_power_info *pi = ci_get_pi(adev);
1178
1179 ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1180 if (ret == PPSMC_Result_OK) {
1181 pi->fan_is_controlled_by_smc = false;
1182 return 0;
1183 } else {
1184 return -EINVAL;
1185 }
1186}
1187
1188static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
1189 u32 *speed)
1190{
1191 u32 duty, duty100;
1192 u64 tmp64;
1193
1194 if (adev->pm.no_fan)
1195 return -ENOENT;
1196
1197 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1198 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1199 duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1200 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1201
1202 if (duty100 == 0)
1203 return -EINVAL;
1204
1205 tmp64 = (u64)duty * 100;
1206 do_div(tmp64, duty100);
1207 *speed = (u32)tmp64;
1208
1209 if (*speed > 100)
1210 *speed = 100;
1211
1212 return 0;
1213}
1214
1215static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
1216 u32 speed)
1217{
1218 u32 tmp;
1219 u32 duty, duty100;
1220 u64 tmp64;
1221 struct ci_power_info *pi = ci_get_pi(adev);
1222
1223 if (adev->pm.no_fan)
1224 return -ENOENT;
1225
1226 if (pi->fan_is_controlled_by_smc)
1227 return -EINVAL;
1228
1229 if (speed > 100)
1230 return -EINVAL;
1231
1232 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1233 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1234
1235 if (duty100 == 0)
1236 return -EINVAL;
1237
1238 tmp64 = (u64)speed * duty100;
1239 do_div(tmp64, 100);
1240 duty = (u32)tmp64;
1241
1242 tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1243 tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1244 WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1245
1246 return 0;
1247}
1248
1249static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
1250{
1251 if (mode) {
1252 /* stop auto-manage */
1253 if (adev->pm.dpm.fan.ucode_fan_control)
1254 ci_fan_ctrl_stop_smc_fan_control(adev);
1255 ci_fan_ctrl_set_static_mode(adev, mode);
1256 } else {
1257 /* restart auto-manage */
1258 if (adev->pm.dpm.fan.ucode_fan_control)
1259 ci_thermal_start_smc_fan_control(adev);
1260 else
1261 ci_fan_ctrl_set_default_mode(adev);
1262 }
1263}
1264
1265static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
1266{
1267 struct ci_power_info *pi = ci_get_pi(adev);
1268 u32 tmp;
1269
1270 if (pi->fan_is_controlled_by_smc)
1271 return 0;
1272
1273 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1274 return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
1275}
1276
1277#if 0
1278static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1279 u32 *speed)
1280{
1281 u32 tach_period;
1282 u32 xclk = amdgpu_asic_get_xclk(adev);
1283
1284 if (adev->pm.no_fan)
1285 return -ENOENT;
1286
1287 if (adev->pm.fan_pulses_per_revolution == 0)
1288 return -ENOENT;
1289
1290 tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1291 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1292 if (tach_period == 0)
1293 return -ENOENT;
1294
1295 *speed = 60 * xclk * 10000 / tach_period;
1296
1297 return 0;
1298}
1299
1300static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1301 u32 speed)
1302{
1303 u32 tach_period, tmp;
1304 u32 xclk = amdgpu_asic_get_xclk(adev);
1305
1306 if (adev->pm.no_fan)
1307 return -ENOENT;
1308
1309 if (adev->pm.fan_pulses_per_revolution == 0)
1310 return -ENOENT;
1311
1312 if ((speed < adev->pm.fan_min_rpm) ||
1313 (speed > adev->pm.fan_max_rpm))
1314 return -EINVAL;
1315
1316 if (adev->pm.dpm.fan.ucode_fan_control)
1317 ci_fan_ctrl_stop_smc_fan_control(adev);
1318
1319 tach_period = 60 * xclk * 10000 / (8 * speed);
1320 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1321 tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1322 WREG32_SMC(CG_TACH_CTRL, tmp);
1323
1324 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1325
1326 return 0;
1327}
1328#endif
1329
1330static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1331{
1332 struct ci_power_info *pi = ci_get_pi(adev);
1333 u32 tmp;
1334
1335 if (!pi->fan_ctrl_is_in_default_mode) {
1336 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1337 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1338 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1339
1340 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1341 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1342 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1343 pi->fan_ctrl_is_in_default_mode = true;
1344 }
1345}
1346
1347static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1348{
1349 if (adev->pm.dpm.fan.ucode_fan_control) {
1350 ci_fan_ctrl_start_smc_fan_control(adev);
1351 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1352 }
1353}
1354
1355static void ci_thermal_initialize(struct amdgpu_device *adev)
1356{
1357 u32 tmp;
1358
1359 if (adev->pm.fan_pulses_per_revolution) {
1360 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1361 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1362 << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1363 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1364 }
1365
1366 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1367 tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1368 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1369}
1370
1371static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1372{
1373 int ret;
1374
1375 ci_thermal_initialize(adev);
1376 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1377 if (ret)
1378 return ret;
1379 ret = ci_thermal_enable_alert(adev, true);
1380 if (ret)
1381 return ret;
1382 if (adev->pm.dpm.fan.ucode_fan_control) {
1383 ret = ci_thermal_setup_fan_table(adev);
1384 if (ret)
1385 return ret;
1386 ci_thermal_start_smc_fan_control(adev);
1387 }
1388
1389 return 0;
1390}
1391
1392static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1393{
1394 if (!adev->pm.no_fan)
1395 ci_fan_ctrl_set_default_mode(adev);
1396}
1397
a2e73f56
AD
1398static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1399 u16 reg_offset, u32 *value)
1400{
1401 struct ci_power_info *pi = ci_get_pi(adev);
1402
1403 return amdgpu_ci_read_smc_sram_dword(adev,
1404 pi->soft_regs_start + reg_offset,
1405 value, pi->sram_end);
1406}
a2e73f56
AD
1407
1408static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1409 u16 reg_offset, u32 value)
1410{
1411 struct ci_power_info *pi = ci_get_pi(adev);
1412
1413 return amdgpu_ci_write_smc_sram_dword(adev,
1414 pi->soft_regs_start + reg_offset,
1415 value, pi->sram_end);
1416}
1417
1418static void ci_init_fps_limits(struct amdgpu_device *adev)
1419{
1420 struct ci_power_info *pi = ci_get_pi(adev);
1421 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1422
1423 if (pi->caps_fps) {
1424 u16 tmp;
1425
1426 tmp = 45;
1427 table->FpsHighT = cpu_to_be16(tmp);
1428
1429 tmp = 30;
1430 table->FpsLowT = cpu_to_be16(tmp);
1431 }
1432}
1433
1434static int ci_update_sclk_t(struct amdgpu_device *adev)
1435{
1436 struct ci_power_info *pi = ci_get_pi(adev);
1437 int ret = 0;
1438 u32 low_sclk_interrupt_t = 0;
1439
1440 if (pi->caps_sclk_throttle_low_notification) {
1441 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1442
1443 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1444 pi->dpm_table_start +
1445 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1446 (u8 *)&low_sclk_interrupt_t,
1447 sizeof(u32), pi->sram_end);
1448
1449 }
1450
1451 return ret;
1452}
1453
1454static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1455{
1456 struct ci_power_info *pi = ci_get_pi(adev);
1457 u16 leakage_id, virtual_voltage_id;
1458 u16 vddc, vddci;
1459 int i;
1460
1461 pi->vddc_leakage.count = 0;
1462 pi->vddci_leakage.count = 0;
1463
1464 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1465 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1466 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1467 if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1468 continue;
1469 if (vddc != 0 && vddc != virtual_voltage_id) {
1470 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1471 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1472 pi->vddc_leakage.count++;
1473 }
1474 }
1475 } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1476 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1477 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1478 if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1479 virtual_voltage_id,
1480 leakage_id) == 0) {
1481 if (vddc != 0 && vddc != virtual_voltage_id) {
1482 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1483 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1484 pi->vddc_leakage.count++;
1485 }
1486 if (vddci != 0 && vddci != virtual_voltage_id) {
1487 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1488 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1489 pi->vddci_leakage.count++;
1490 }
1491 }
1492 }
1493 }
1494}
1495
1496static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1497{
1498 struct ci_power_info *pi = ci_get_pi(adev);
1499 bool want_thermal_protection;
1500 enum amdgpu_dpm_event_src dpm_event_src;
1501 u32 tmp;
1502
1503 switch (sources) {
1504 case 0:
1505 default:
1506 want_thermal_protection = false;
1507 break;
1508 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1509 want_thermal_protection = true;
1510 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1511 break;
1512 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1513 want_thermal_protection = true;
1514 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1515 break;
1516 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1517 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1518 want_thermal_protection = true;
1519 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1520 break;
1521 }
1522
1523 if (want_thermal_protection) {
1524#if 0
1525 /* XXX: need to figure out how to handle this properly */
1526 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1527 tmp &= DPM_EVENT_SRC_MASK;
1528 tmp |= DPM_EVENT_SRC(dpm_event_src);
1529 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1530#endif
1531
1532 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1533 if (pi->thermal_protection)
1534 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1535 else
1536 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1537 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1538 } else {
1539 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1540 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1541 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1542 }
1543}
1544
1545static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1546 enum amdgpu_dpm_auto_throttle_src source,
1547 bool enable)
1548{
1549 struct ci_power_info *pi = ci_get_pi(adev);
1550
1551 if (enable) {
1552 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1553 pi->active_auto_throttle_sources |= 1 << source;
1554 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1555 }
1556 } else {
1557 if (pi->active_auto_throttle_sources & (1 << source)) {
1558 pi->active_auto_throttle_sources &= ~(1 << source);
1559 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1560 }
1561 }
1562}
1563
1564static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1565{
1566 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1567 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1568}
1569
1570static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1571{
1572 struct ci_power_info *pi = ci_get_pi(adev);
1573 PPSMC_Result smc_result;
1574
1575 if (!pi->need_update_smu7_dpm_table)
1576 return 0;
1577
1578 if ((!pi->sclk_dpm_key_disabled) &&
1579 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1580 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1581 if (smc_result != PPSMC_Result_OK)
1582 return -EINVAL;
1583 }
1584
1585 if ((!pi->mclk_dpm_key_disabled) &&
1586 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1587 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1588 if (smc_result != PPSMC_Result_OK)
1589 return -EINVAL;
1590 }
1591
1592 pi->need_update_smu7_dpm_table = 0;
1593 return 0;
1594}
1595
1596static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1597{
1598 struct ci_power_info *pi = ci_get_pi(adev);
1599 PPSMC_Result smc_result;
1600
1601 if (enable) {
1602 if (!pi->sclk_dpm_key_disabled) {
1603 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1604 if (smc_result != PPSMC_Result_OK)
1605 return -EINVAL;
1606 }
1607
1608 if (!pi->mclk_dpm_key_disabled) {
1609 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1610 if (smc_result != PPSMC_Result_OK)
1611 return -EINVAL;
1612
1613 WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1614 ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1615
1616 WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1617 WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1618 WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1619
1620 udelay(10);
1621
1622 WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1623 WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1624 WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1625 }
1626 } else {
1627 if (!pi->sclk_dpm_key_disabled) {
1628 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1629 if (smc_result != PPSMC_Result_OK)
1630 return -EINVAL;
1631 }
1632
1633 if (!pi->mclk_dpm_key_disabled) {
1634 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1635 if (smc_result != PPSMC_Result_OK)
1636 return -EINVAL;
1637 }
1638 }
1639
1640 return 0;
1641}
1642
1643static int ci_start_dpm(struct amdgpu_device *adev)
1644{
1645 struct ci_power_info *pi = ci_get_pi(adev);
1646 PPSMC_Result smc_result;
1647 int ret;
1648 u32 tmp;
1649
1650 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1651 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1652 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1653
1654 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1655 tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1656 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1657
1658 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1659
1660 WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1661
1662 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1663 if (smc_result != PPSMC_Result_OK)
1664 return -EINVAL;
1665
1666 ret = ci_enable_sclk_mclk_dpm(adev, true);
1667 if (ret)
1668 return ret;
1669
1670 if (!pi->pcie_dpm_key_disabled) {
1671 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1672 if (smc_result != PPSMC_Result_OK)
1673 return -EINVAL;
1674 }
1675
1676 return 0;
1677}
1678
1679static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1680{
1681 struct ci_power_info *pi = ci_get_pi(adev);
1682 PPSMC_Result smc_result;
1683
1684 if (!pi->need_update_smu7_dpm_table)
1685 return 0;
1686
1687 if ((!pi->sclk_dpm_key_disabled) &&
1688 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1689 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1690 if (smc_result != PPSMC_Result_OK)
1691 return -EINVAL;
1692 }
1693
1694 if ((!pi->mclk_dpm_key_disabled) &&
1695 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1696 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1697 if (smc_result != PPSMC_Result_OK)
1698 return -EINVAL;
1699 }
1700
1701 return 0;
1702}
1703
1704static int ci_stop_dpm(struct amdgpu_device *adev)
1705{
1706 struct ci_power_info *pi = ci_get_pi(adev);
1707 PPSMC_Result smc_result;
1708 int ret;
1709 u32 tmp;
1710
1711 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1712 tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1713 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1714
1715 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1716 tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1717 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1718
1719 if (!pi->pcie_dpm_key_disabled) {
1720 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1721 if (smc_result != PPSMC_Result_OK)
1722 return -EINVAL;
1723 }
1724
1725 ret = ci_enable_sclk_mclk_dpm(adev, false);
1726 if (ret)
1727 return ret;
1728
1729 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1730 if (smc_result != PPSMC_Result_OK)
1731 return -EINVAL;
1732
1733 return 0;
1734}
1735
1736static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1737{
1738 u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1739
1740 if (enable)
1741 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1742 else
1743 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1744 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1745}
1746
1747#if 0
1748static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1749 bool ac_power)
1750{
1751 struct ci_power_info *pi = ci_get_pi(adev);
1752 struct amdgpu_cac_tdp_table *cac_tdp_table =
1753 adev->pm.dpm.dyn_state.cac_tdp_table;
1754 u32 power_limit;
1755
1756 if (ac_power)
1757 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1758 else
1759 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1760
1761 ci_set_power_limit(adev, power_limit);
1762
1763 if (pi->caps_automatic_dc_transition) {
1764 if (ac_power)
1765 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1766 else
1767 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1768 }
1769
1770 return 0;
1771}
1772#endif
1773
1774static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1775 PPSMC_Msg msg, u32 parameter)
1776{
1777 WREG32(mmSMC_MSG_ARG_0, parameter);
1778 return amdgpu_ci_send_msg_to_smc(adev, msg);
1779}
1780
1781static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1782 PPSMC_Msg msg, u32 *parameter)
1783{
1784 PPSMC_Result smc_result;
1785
1786 smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1787
1788 if ((smc_result == PPSMC_Result_OK) && parameter)
1789 *parameter = RREG32(mmSMC_MSG_ARG_0);
1790
1791 return smc_result;
1792}
1793
1794static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1795{
1796 struct ci_power_info *pi = ci_get_pi(adev);
1797
1798 if (!pi->sclk_dpm_key_disabled) {
1799 PPSMC_Result smc_result =
1800 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1801 if (smc_result != PPSMC_Result_OK)
1802 return -EINVAL;
1803 }
1804
1805 return 0;
1806}
1807
1808static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1809{
1810 struct ci_power_info *pi = ci_get_pi(adev);
1811
1812 if (!pi->mclk_dpm_key_disabled) {
1813 PPSMC_Result smc_result =
1814 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1815 if (smc_result != PPSMC_Result_OK)
1816 return -EINVAL;
1817 }
1818
1819 return 0;
1820}
1821
1822static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1823{
1824 struct ci_power_info *pi = ci_get_pi(adev);
1825
1826 if (!pi->pcie_dpm_key_disabled) {
1827 PPSMC_Result smc_result =
1828 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1829 if (smc_result != PPSMC_Result_OK)
1830 return -EINVAL;
1831 }
1832
1833 return 0;
1834}
1835
1836static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1837{
1838 struct ci_power_info *pi = ci_get_pi(adev);
1839
1840 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1841 PPSMC_Result smc_result =
1842 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1843 if (smc_result != PPSMC_Result_OK)
1844 return -EINVAL;
1845 }
1846
1847 return 0;
1848}
1849
1850static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1851 u32 target_tdp)
1852{
1853 PPSMC_Result smc_result =
1854 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1855 if (smc_result != PPSMC_Result_OK)
1856 return -EINVAL;
1857 return 0;
1858}
1859
1860#if 0
1861static int ci_set_boot_state(struct amdgpu_device *adev)
1862{
1863 return ci_enable_sclk_mclk_dpm(adev, false);
1864}
1865#endif
1866
1867static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1868{
1869 u32 sclk_freq;
1870 PPSMC_Result smc_result =
1871 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1872 PPSMC_MSG_API_GetSclkFrequency,
1873 &sclk_freq);
1874 if (smc_result != PPSMC_Result_OK)
1875 sclk_freq = 0;
1876
1877 return sclk_freq;
1878}
1879
1880static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1881{
1882 u32 mclk_freq;
1883 PPSMC_Result smc_result =
1884 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1885 PPSMC_MSG_API_GetMclkFrequency,
1886 &mclk_freq);
1887 if (smc_result != PPSMC_Result_OK)
1888 mclk_freq = 0;
1889
1890 return mclk_freq;
1891}
1892
1893static void ci_dpm_start_smc(struct amdgpu_device *adev)
1894{
1895 int i;
1896
1897 amdgpu_ci_program_jump_on_start(adev);
1898 amdgpu_ci_start_smc_clock(adev);
1899 amdgpu_ci_start_smc(adev);
1900 for (i = 0; i < adev->usec_timeout; i++) {
1901 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1902 break;
1903 }
1904}
1905
1906static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1907{
1908 amdgpu_ci_reset_smc(adev);
1909 amdgpu_ci_stop_smc_clock(adev);
1910}
1911
1912static int ci_process_firmware_header(struct amdgpu_device *adev)
1913{
1914 struct ci_power_info *pi = ci_get_pi(adev);
1915 u32 tmp;
1916 int ret;
1917
1918 ret = amdgpu_ci_read_smc_sram_dword(adev,
1919 SMU7_FIRMWARE_HEADER_LOCATION +
1920 offsetof(SMU7_Firmware_Header, DpmTable),
1921 &tmp, pi->sram_end);
1922 if (ret)
1923 return ret;
1924
1925 pi->dpm_table_start = tmp;
1926
1927 ret = amdgpu_ci_read_smc_sram_dword(adev,
1928 SMU7_FIRMWARE_HEADER_LOCATION +
1929 offsetof(SMU7_Firmware_Header, SoftRegisters),
1930 &tmp, pi->sram_end);
1931 if (ret)
1932 return ret;
1933
1934 pi->soft_regs_start = tmp;
1935
1936 ret = amdgpu_ci_read_smc_sram_dword(adev,
1937 SMU7_FIRMWARE_HEADER_LOCATION +
1938 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1939 &tmp, pi->sram_end);
1940 if (ret)
1941 return ret;
1942
1943 pi->mc_reg_table_start = tmp;
1944
1945 ret = amdgpu_ci_read_smc_sram_dword(adev,
1946 SMU7_FIRMWARE_HEADER_LOCATION +
1947 offsetof(SMU7_Firmware_Header, FanTable),
1948 &tmp, pi->sram_end);
1949 if (ret)
1950 return ret;
1951
1952 pi->fan_table_start = tmp;
1953
1954 ret = amdgpu_ci_read_smc_sram_dword(adev,
1955 SMU7_FIRMWARE_HEADER_LOCATION +
1956 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1957 &tmp, pi->sram_end);
1958 if (ret)
1959 return ret;
1960
1961 pi->arb_table_start = tmp;
1962
1963 return 0;
1964}
1965
1966static void ci_read_clock_registers(struct amdgpu_device *adev)
1967{
1968 struct ci_power_info *pi = ci_get_pi(adev);
1969
1970 pi->clock_registers.cg_spll_func_cntl =
1971 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
1972 pi->clock_registers.cg_spll_func_cntl_2 =
1973 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
1974 pi->clock_registers.cg_spll_func_cntl_3 =
1975 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
1976 pi->clock_registers.cg_spll_func_cntl_4 =
1977 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
1978 pi->clock_registers.cg_spll_spread_spectrum =
1979 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
1980 pi->clock_registers.cg_spll_spread_spectrum_2 =
1981 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
1982 pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
1983 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
1984 pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
1985 pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
1986 pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
1987 pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
1988 pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
1989 pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
1990 pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
1991}
1992
1993static void ci_init_sclk_t(struct amdgpu_device *adev)
1994{
1995 struct ci_power_info *pi = ci_get_pi(adev);
1996
1997 pi->low_sclk_interrupt_t = 0;
1998}
1999
2000static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2001 bool enable)
2002{
2003 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2004
2005 if (enable)
2006 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2007 else
2008 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2009 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2010}
2011
2012static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2013{
2014 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2015
2016 tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2017
2018 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2019}
2020
2021#if 0
2022static int ci_enter_ulp_state(struct amdgpu_device *adev)
2023{
2024
2025 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2026
2027 udelay(25000);
2028
2029 return 0;
2030}
2031
2032static int ci_exit_ulp_state(struct amdgpu_device *adev)
2033{
2034 int i;
2035
2036 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2037
2038 udelay(7000);
2039
2040 for (i = 0; i < adev->usec_timeout; i++) {
2041 if (RREG32(mmSMC_RESP_0) == 1)
2042 break;
2043 udelay(1000);
2044 }
2045
2046 return 0;
2047}
2048#endif
2049
2050static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2051 bool has_display)
2052{
2053 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2054
2055 return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
2056}
2057
2058static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2059 bool enable)
2060{
2061 struct ci_power_info *pi = ci_get_pi(adev);
2062
2063 if (enable) {
2064 if (pi->caps_sclk_ds) {
2065 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2066 return -EINVAL;
2067 } else {
2068 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2069 return -EINVAL;
2070 }
2071 } else {
2072 if (pi->caps_sclk_ds) {
2073 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2074 return -EINVAL;
2075 }
2076 }
2077
2078 return 0;
2079}
2080
2081static void ci_program_display_gap(struct amdgpu_device *adev)
2082{
2083 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2084 u32 pre_vbi_time_in_us;
2085 u32 frame_time_in_us;
2086 u32 ref_clock = adev->clock.spll.reference_freq;
2087 u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2088 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2089
2090 tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2091 if (adev->pm.dpm.new_active_crtc_count > 0)
2092 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2093 else
2094 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2095 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2096
2097 if (refresh_rate == 0)
2098 refresh_rate = 60;
2099 if (vblank_time == 0xffffffff)
2100 vblank_time = 500;
2101 frame_time_in_us = 1000000 / refresh_rate;
2102 pre_vbi_time_in_us =
2103 frame_time_in_us - 200 - vblank_time;
2104 tmp = pre_vbi_time_in_us * (ref_clock / 100);
2105
2106 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2107 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2108 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2109
2110
2111 ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2112
2113}
2114
2115static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2116{
2117 struct ci_power_info *pi = ci_get_pi(adev);
2118 u32 tmp;
2119
2120 if (enable) {
2121 if (pi->caps_sclk_ss_support) {
2122 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2123 tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2124 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2125 }
2126 } else {
2127 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2128 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2129 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2130
2131 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2132 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2133 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2134 }
2135}
2136
2137static void ci_program_sstp(struct amdgpu_device *adev)
2138{
2139 WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2140 ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2141 (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2142}
2143
2144static void ci_enable_display_gap(struct amdgpu_device *adev)
2145{
2146 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2147
2148 tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2149 CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2150 tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2151 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2152
2153 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2154}
2155
2156static void ci_program_vc(struct amdgpu_device *adev)
2157{
2158 u32 tmp;
2159
2160 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2161 tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2162 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2163
2164 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2165 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2166 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2167 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2168 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2169 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2170 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2171 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2172}
2173
2174static void ci_clear_vc(struct amdgpu_device *adev)
2175{
2176 u32 tmp;
2177
2178 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2179 tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2180 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2181
2182 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2183 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2184 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2185 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2186 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2187 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2188 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2189 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2190}
2191
2192static int ci_upload_firmware(struct amdgpu_device *adev)
2193{
2194 struct ci_power_info *pi = ci_get_pi(adev);
2195 int i, ret;
2196
2197 for (i = 0; i < adev->usec_timeout; i++) {
2198 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2199 break;
2200 }
2201 WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2202
2203 amdgpu_ci_stop_smc_clock(adev);
2204 amdgpu_ci_reset_smc(adev);
2205
2206 ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
2207
2208 return ret;
2209
2210}
2211
2212static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2213 struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2214 struct atom_voltage_table *voltage_table)
2215{
2216 u32 i;
2217
2218 if (voltage_dependency_table == NULL)
2219 return -EINVAL;
2220
2221 voltage_table->mask_low = 0;
2222 voltage_table->phase_delay = 0;
2223
2224 voltage_table->count = voltage_dependency_table->count;
2225 for (i = 0; i < voltage_table->count; i++) {
2226 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2227 voltage_table->entries[i].smio_low = 0;
2228 }
2229
2230 return 0;
2231}
2232
2233static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2234{
2235 struct ci_power_info *pi = ci_get_pi(adev);
2236 int ret;
2237
2238 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2239 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2240 VOLTAGE_OBJ_GPIO_LUT,
2241 &pi->vddc_voltage_table);
2242 if (ret)
2243 return ret;
2244 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2245 ret = ci_get_svi2_voltage_table(adev,
2246 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2247 &pi->vddc_voltage_table);
2248 if (ret)
2249 return ret;
2250 }
2251
2252 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2253 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2254 &pi->vddc_voltage_table);
2255
2256 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2257 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2258 VOLTAGE_OBJ_GPIO_LUT,
2259 &pi->vddci_voltage_table);
2260 if (ret)
2261 return ret;
2262 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2263 ret = ci_get_svi2_voltage_table(adev,
2264 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2265 &pi->vddci_voltage_table);
2266 if (ret)
2267 return ret;
2268 }
2269
2270 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2271 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2272 &pi->vddci_voltage_table);
2273
2274 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2275 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2276 VOLTAGE_OBJ_GPIO_LUT,
2277 &pi->mvdd_voltage_table);
2278 if (ret)
2279 return ret;
2280 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2281 ret = ci_get_svi2_voltage_table(adev,
2282 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2283 &pi->mvdd_voltage_table);
2284 if (ret)
2285 return ret;
2286 }
2287
2288 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2289 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2290 &pi->mvdd_voltage_table);
2291
2292 return 0;
2293}
2294
2295static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2296 struct atom_voltage_table_entry *voltage_table,
2297 SMU7_Discrete_VoltageLevel *smc_voltage_table)
2298{
2299 int ret;
2300
2301 ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2302 &smc_voltage_table->StdVoltageHiSidd,
2303 &smc_voltage_table->StdVoltageLoSidd);
2304
2305 if (ret) {
2306 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2307 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2308 }
2309
2310 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2311 smc_voltage_table->StdVoltageHiSidd =
2312 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2313 smc_voltage_table->StdVoltageLoSidd =
2314 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2315}
2316
2317static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2318 SMU7_Discrete_DpmTable *table)
2319{
2320 struct ci_power_info *pi = ci_get_pi(adev);
2321 unsigned int count;
2322
2323 table->VddcLevelCount = pi->vddc_voltage_table.count;
2324 for (count = 0; count < table->VddcLevelCount; count++) {
2325 ci_populate_smc_voltage_table(adev,
2326 &pi->vddc_voltage_table.entries[count],
2327 &table->VddcLevel[count]);
2328
2329 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2330 table->VddcLevel[count].Smio |=
2331 pi->vddc_voltage_table.entries[count].smio_low;
2332 else
2333 table->VddcLevel[count].Smio = 0;
2334 }
2335 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2336
2337 return 0;
2338}
2339
2340static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2341 SMU7_Discrete_DpmTable *table)
2342{
2343 unsigned int count;
2344 struct ci_power_info *pi = ci_get_pi(adev);
2345
2346 table->VddciLevelCount = pi->vddci_voltage_table.count;
2347 for (count = 0; count < table->VddciLevelCount; count++) {
2348 ci_populate_smc_voltage_table(adev,
2349 &pi->vddci_voltage_table.entries[count],
2350 &table->VddciLevel[count]);
2351
2352 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2353 table->VddciLevel[count].Smio |=
2354 pi->vddci_voltage_table.entries[count].smio_low;
2355 else
2356 table->VddciLevel[count].Smio = 0;
2357 }
2358 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2359
2360 return 0;
2361}
2362
2363static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2364 SMU7_Discrete_DpmTable *table)
2365{
2366 struct ci_power_info *pi = ci_get_pi(adev);
2367 unsigned int count;
2368
2369 table->MvddLevelCount = pi->mvdd_voltage_table.count;
2370 for (count = 0; count < table->MvddLevelCount; count++) {
2371 ci_populate_smc_voltage_table(adev,
2372 &pi->mvdd_voltage_table.entries[count],
2373 &table->MvddLevel[count]);
2374
2375 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2376 table->MvddLevel[count].Smio |=
2377 pi->mvdd_voltage_table.entries[count].smio_low;
2378 else
2379 table->MvddLevel[count].Smio = 0;
2380 }
2381 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2382
2383 return 0;
2384}
2385
2386static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2387 SMU7_Discrete_DpmTable *table)
2388{
2389 int ret;
2390
2391 ret = ci_populate_smc_vddc_table(adev, table);
2392 if (ret)
2393 return ret;
2394
2395 ret = ci_populate_smc_vddci_table(adev, table);
2396 if (ret)
2397 return ret;
2398
2399 ret = ci_populate_smc_mvdd_table(adev, table);
2400 if (ret)
2401 return ret;
2402
2403 return 0;
2404}
2405
2406static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2407 SMU7_Discrete_VoltageLevel *voltage)
2408{
2409 struct ci_power_info *pi = ci_get_pi(adev);
2410 u32 i = 0;
2411
2412 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2413 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2414 if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2415 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2416 break;
2417 }
2418 }
2419
2420 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2421 return -EINVAL;
2422 }
2423
2424 return -EINVAL;
2425}
2426
2427static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2428 struct atom_voltage_table_entry *voltage_table,
2429 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2430{
2431 u16 v_index, idx;
2432 bool voltage_found = false;
2433 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2434 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2435
2436 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2437 return -EINVAL;
2438
2439 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2440 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2441 if (voltage_table->value ==
2442 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2443 voltage_found = true;
2444 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2445 idx = v_index;
2446 else
2447 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2448 *std_voltage_lo_sidd =
2449 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2450 *std_voltage_hi_sidd =
2451 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2452 break;
2453 }
2454 }
2455
2456 if (!voltage_found) {
2457 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2458 if (voltage_table->value <=
2459 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2460 voltage_found = true;
2461 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2462 idx = v_index;
2463 else
2464 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2465 *std_voltage_lo_sidd =
2466 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2467 *std_voltage_hi_sidd =
2468 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2469 break;
2470 }
2471 }
2472 }
2473 }
2474
2475 return 0;
2476}
2477
2478static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2479 const struct amdgpu_phase_shedding_limits_table *limits,
2480 u32 sclk,
2481 u32 *phase_shedding)
2482{
2483 unsigned int i;
2484
2485 *phase_shedding = 1;
2486
2487 for (i = 0; i < limits->count; i++) {
2488 if (sclk < limits->entries[i].sclk) {
2489 *phase_shedding = i;
2490 break;
2491 }
2492 }
2493}
2494
2495static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2496 const struct amdgpu_phase_shedding_limits_table *limits,
2497 u32 mclk,
2498 u32 *phase_shedding)
2499{
2500 unsigned int i;
2501
2502 *phase_shedding = 1;
2503
2504 for (i = 0; i < limits->count; i++) {
2505 if (mclk < limits->entries[i].mclk) {
2506 *phase_shedding = i;
2507 break;
2508 }
2509 }
2510}
2511
2512static int ci_init_arb_table_index(struct amdgpu_device *adev)
2513{
2514 struct ci_power_info *pi = ci_get_pi(adev);
2515 u32 tmp;
2516 int ret;
2517
2518 ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2519 &tmp, pi->sram_end);
2520 if (ret)
2521 return ret;
2522
2523 tmp &= 0x00FFFFFF;
2524 tmp |= MC_CG_ARB_FREQ_F1 << 24;
2525
2526 return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2527 tmp, pi->sram_end);
2528}
2529
2530static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2531 struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2532 u32 clock, u32 *voltage)
2533{
2534 u32 i = 0;
2535
2536 if (allowed_clock_voltage_table->count == 0)
2537 return -EINVAL;
2538
2539 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2540 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2541 *voltage = allowed_clock_voltage_table->entries[i].v;
2542 return 0;
2543 }
2544 }
2545
2546 *voltage = allowed_clock_voltage_table->entries[i-1].v;
2547
2548 return 0;
2549}
2550
2551static u8 ci_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
2552 u32 sclk, u32 min_sclk_in_sr)
2553{
2554 u32 i;
2555 u32 tmp;
2556 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2557 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2558
2559 if (sclk < min)
2560 return 0;
2561
2562 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
2563 tmp = sclk / (1 << i);
2564 if (tmp >= min || i == 0)
2565 break;
2566 }
2567
2568 return (u8)i;
2569}
2570
2571static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2572{
2573 return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2574}
2575
2576static int ci_reset_to_default(struct amdgpu_device *adev)
2577{
2578 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2579 0 : -EINVAL;
2580}
2581
2582static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2583{
2584 u32 tmp;
2585
2586 tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2587
2588 if (tmp == MC_CG_ARB_FREQ_F0)
2589 return 0;
2590
2591 return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2592}
2593
2594static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2595 const u32 engine_clock,
2596 const u32 memory_clock,
2597 u32 *dram_timimg2)
2598{
2599 bool patch;
2600 u32 tmp, tmp2;
2601
2602 tmp = RREG32(mmMC_SEQ_MISC0);
2603 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2604
2605 if (patch &&
2606 ((adev->pdev->device == 0x67B0) ||
2607 (adev->pdev->device == 0x67B1))) {
2608 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2609 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2610 *dram_timimg2 &= ~0x00ff0000;
2611 *dram_timimg2 |= tmp2 << 16;
2612 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2613 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2614 *dram_timimg2 &= ~0x00ff0000;
2615 *dram_timimg2 |= tmp2 << 16;
2616 }
2617 }
2618}
2619
2620static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2621 u32 sclk,
2622 u32 mclk,
2623 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2624{
2625 u32 dram_timing;
2626 u32 dram_timing2;
2627 u32 burst_time;
2628
2629 amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2630
2631 dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
2632 dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2633 burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2634
2635 ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2636
2637 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
2638 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2639 arb_regs->McArbBurstTime = (u8)burst_time;
2640
2641 return 0;
2642}
2643
2644static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2645{
2646 struct ci_power_info *pi = ci_get_pi(adev);
2647 SMU7_Discrete_MCArbDramTimingTable arb_regs;
2648 u32 i, j;
2649 int ret = 0;
2650
2651 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2652
2653 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2654 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2655 ret = ci_populate_memory_timing_parameters(adev,
2656 pi->dpm_table.sclk_table.dpm_levels[i].value,
2657 pi->dpm_table.mclk_table.dpm_levels[j].value,
2658 &arb_regs.entries[i][j]);
2659 if (ret)
2660 break;
2661 }
2662 }
2663
2664 if (ret == 0)
2665 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2666 pi->arb_table_start,
2667 (u8 *)&arb_regs,
2668 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2669 pi->sram_end);
2670
2671 return ret;
2672}
2673
2674static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2675{
2676 struct ci_power_info *pi = ci_get_pi(adev);
2677
2678 if (pi->need_update_smu7_dpm_table == 0)
2679 return 0;
2680
2681 return ci_do_program_memory_timing_parameters(adev);
2682}
2683
2684static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2685 struct amdgpu_ps *amdgpu_boot_state)
2686{
2687 struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2688 struct ci_power_info *pi = ci_get_pi(adev);
2689 u32 level = 0;
2690
2691 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2692 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2693 boot_state->performance_levels[0].sclk) {
2694 pi->smc_state_table.GraphicsBootLevel = level;
2695 break;
2696 }
2697 }
2698
2699 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2700 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2701 boot_state->performance_levels[0].mclk) {
2702 pi->smc_state_table.MemoryBootLevel = level;
2703 break;
2704 }
2705 }
2706}
2707
2708static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2709{
2710 u32 i;
2711 u32 mask_value = 0;
2712
2713 for (i = dpm_table->count; i > 0; i--) {
2714 mask_value = mask_value << 1;
2715 if (dpm_table->dpm_levels[i-1].enabled)
2716 mask_value |= 0x1;
2717 else
2718 mask_value &= 0xFFFFFFFE;
2719 }
2720
2721 return mask_value;
2722}
2723
2724static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2725 SMU7_Discrete_DpmTable *table)
2726{
2727 struct ci_power_info *pi = ci_get_pi(adev);
2728 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2729 u32 i;
2730
2731 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2732 table->LinkLevel[i].PcieGenSpeed =
2733 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2734 table->LinkLevel[i].PcieLaneCount =
2735 amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2736 table->LinkLevel[i].EnabledForActivity = 1;
2737 table->LinkLevel[i].DownT = cpu_to_be32(5);
2738 table->LinkLevel[i].UpT = cpu_to_be32(30);
2739 }
2740
2741 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2742 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2743 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2744}
2745
2746static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2747 SMU7_Discrete_DpmTable *table)
2748{
2749 u32 count;
2750 struct atom_clock_dividers dividers;
2751 int ret = -EINVAL;
2752
2753 table->UvdLevelCount =
2754 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2755
2756 for (count = 0; count < table->UvdLevelCount; count++) {
2757 table->UvdLevel[count].VclkFrequency =
2758 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2759 table->UvdLevel[count].DclkFrequency =
2760 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2761 table->UvdLevel[count].MinVddc =
2762 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2763 table->UvdLevel[count].MinVddcPhases = 1;
2764
2765 ret = amdgpu_atombios_get_clock_dividers(adev,
2766 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2767 table->UvdLevel[count].VclkFrequency, false, &dividers);
2768 if (ret)
2769 return ret;
2770
2771 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2772
2773 ret = amdgpu_atombios_get_clock_dividers(adev,
2774 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2775 table->UvdLevel[count].DclkFrequency, false, &dividers);
2776 if (ret)
2777 return ret;
2778
2779 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2780
2781 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2782 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2783 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2784 }
2785
2786 return ret;
2787}
2788
2789static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2790 SMU7_Discrete_DpmTable *table)
2791{
2792 u32 count;
2793 struct atom_clock_dividers dividers;
2794 int ret = -EINVAL;
2795
2796 table->VceLevelCount =
2797 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2798
2799 for (count = 0; count < table->VceLevelCount; count++) {
2800 table->VceLevel[count].Frequency =
2801 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2802 table->VceLevel[count].MinVoltage =
2803 (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2804 table->VceLevel[count].MinPhases = 1;
2805
2806 ret = amdgpu_atombios_get_clock_dividers(adev,
2807 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2808 table->VceLevel[count].Frequency, false, &dividers);
2809 if (ret)
2810 return ret;
2811
2812 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2813
2814 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2815 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2816 }
2817
2818 return ret;
2819
2820}
2821
2822static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2823 SMU7_Discrete_DpmTable *table)
2824{
2825 u32 count;
2826 struct atom_clock_dividers dividers;
2827 int ret = -EINVAL;
2828
2829 table->AcpLevelCount = (u8)
2830 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2831
2832 for (count = 0; count < table->AcpLevelCount; count++) {
2833 table->AcpLevel[count].Frequency =
2834 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2835 table->AcpLevel[count].MinVoltage =
2836 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2837 table->AcpLevel[count].MinPhases = 1;
2838
2839 ret = amdgpu_atombios_get_clock_dividers(adev,
2840 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2841 table->AcpLevel[count].Frequency, false, &dividers);
2842 if (ret)
2843 return ret;
2844
2845 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2846
2847 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2848 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2849 }
2850
2851 return ret;
2852}
2853
2854static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2855 SMU7_Discrete_DpmTable *table)
2856{
2857 u32 count;
2858 struct atom_clock_dividers dividers;
2859 int ret = -EINVAL;
2860
2861 table->SamuLevelCount =
2862 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2863
2864 for (count = 0; count < table->SamuLevelCount; count++) {
2865 table->SamuLevel[count].Frequency =
2866 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2867 table->SamuLevel[count].MinVoltage =
2868 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2869 table->SamuLevel[count].MinPhases = 1;
2870
2871 ret = amdgpu_atombios_get_clock_dividers(adev,
2872 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2873 table->SamuLevel[count].Frequency, false, &dividers);
2874 if (ret)
2875 return ret;
2876
2877 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2878
2879 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2880 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2881 }
2882
2883 return ret;
2884}
2885
2886static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2887 u32 memory_clock,
2888 SMU7_Discrete_MemoryLevel *mclk,
2889 bool strobe_mode,
2890 bool dll_state_on)
2891{
2892 struct ci_power_info *pi = ci_get_pi(adev);
2893 u32 dll_cntl = pi->clock_registers.dll_cntl;
2894 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2895 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2896 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2897 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2898 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2899 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2900 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2901 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2902 struct atom_mpll_param mpll_param;
2903 int ret;
2904
2905 ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2906 if (ret)
2907 return ret;
2908
2909 mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2910 mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2911
2912 mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2913 MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2914 mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2915 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2916 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2917
2918 mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2919 mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2920
81c59f54 2921 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
a2e73f56
AD
2922 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2923 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2924 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2925 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2926 }
2927
2928 if (pi->caps_mclk_ss_support) {
2929 struct amdgpu_atom_ss ss;
2930 u32 freq_nom;
2931 u32 tmp;
2932 u32 reference_clock = adev->clock.mpll.reference_freq;
2933
2934 if (mpll_param.qdr == 1)
2935 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2936 else
2937 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2938
2939 tmp = (freq_nom / reference_clock);
2940 tmp = tmp * tmp;
2941 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2942 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2943 u32 clks = reference_clock * 5 / ss.rate;
2944 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2945
2946 mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2947 mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2948
2949 mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2950 mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2951 }
2952 }
2953
2954 mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2955 mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2956
2957 if (dll_state_on)
2958 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2959 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2960 else
2961 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2962 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2963
2964 mclk->MclkFrequency = memory_clock;
2965 mclk->MpllFuncCntl = mpll_func_cntl;
2966 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2967 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2968 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2969 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2970 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2971 mclk->DllCntl = dll_cntl;
2972 mclk->MpllSs1 = mpll_ss1;
2973 mclk->MpllSs2 = mpll_ss2;
2974
2975 return 0;
2976}
2977
2978static int ci_populate_single_memory_level(struct amdgpu_device *adev,
2979 u32 memory_clock,
2980 SMU7_Discrete_MemoryLevel *memory_level)
2981{
2982 struct ci_power_info *pi = ci_get_pi(adev);
2983 int ret;
2984 bool dll_state_on;
2985
2986 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2987 ret = ci_get_dependency_volt_by_clk(adev,
2988 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2989 memory_clock, &memory_level->MinVddc);
2990 if (ret)
2991 return ret;
2992 }
2993
2994 if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2995 ret = ci_get_dependency_volt_by_clk(adev,
2996 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2997 memory_clock, &memory_level->MinVddci);
2998 if (ret)
2999 return ret;
3000 }
3001
3002 if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3003 ret = ci_get_dependency_volt_by_clk(adev,
3004 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3005 memory_clock, &memory_level->MinMvdd);
3006 if (ret)
3007 return ret;
3008 }
3009
3010 memory_level->MinVddcPhases = 1;
3011
3012 if (pi->vddc_phase_shed_control)
3013 ci_populate_phase_value_based_on_mclk(adev,
3014 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3015 memory_clock,
3016 &memory_level->MinVddcPhases);
3017
3018 memory_level->EnabledForThrottle = 1;
3019 memory_level->EnabledForActivity = 1;
3020 memory_level->UpH = 0;
3021 memory_level->DownH = 100;
3022 memory_level->VoltageDownH = 0;
3023 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3024
3025 memory_level->StutterEnable = false;
3026 memory_level->StrobeEnable = false;
3027 memory_level->EdcReadEnable = false;
3028 memory_level->EdcWriteEnable = false;
3029 memory_level->RttEnable = false;
3030
3031 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3032
3033 if (pi->mclk_stutter_mode_threshold &&
3034 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
3035 (pi->uvd_enabled == false) &&
3036 (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3037 (adev->pm.dpm.new_active_crtc_count <= 2))
3038 memory_level->StutterEnable = true;
3039
3040 if (pi->mclk_strobe_mode_threshold &&
3041 (memory_clock <= pi->mclk_strobe_mode_threshold))
3042 memory_level->StrobeEnable = 1;
3043
81c59f54 3044 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
a2e73f56
AD
3045 memory_level->StrobeRatio =
3046 ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3047 if (pi->mclk_edc_enable_threshold &&
3048 (memory_clock > pi->mclk_edc_enable_threshold))
3049 memory_level->EdcReadEnable = true;
3050
3051 if (pi->mclk_edc_wr_enable_threshold &&
3052 (memory_clock > pi->mclk_edc_wr_enable_threshold))
3053 memory_level->EdcWriteEnable = true;
3054
3055 if (memory_level->StrobeEnable) {
3056 if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3057 ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3058 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3059 else
3060 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3061 } else {
3062 dll_state_on = pi->dll_default_on;
3063 }
3064 } else {
3065 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3066 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3067 }
3068
3069 ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3070 if (ret)
3071 return ret;
3072
3073 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3074 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3075 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3076 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3077
3078 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3079 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3080 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3081 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3082 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3083 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3084 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3085 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3086 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3087 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3088 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3089
3090 return 0;
3091}
3092
3093static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3094 SMU7_Discrete_DpmTable *table)
3095{
3096 struct ci_power_info *pi = ci_get_pi(adev);
3097 struct atom_clock_dividers dividers;
3098 SMU7_Discrete_VoltageLevel voltage_level;
3099 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3100 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3101 u32 dll_cntl = pi->clock_registers.dll_cntl;
3102 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3103 int ret;
3104
3105 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3106
3107 if (pi->acpi_vddc)
3108 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3109 else
3110 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3111
3112 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3113
3114 table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3115
3116 ret = amdgpu_atombios_get_clock_dividers(adev,
3117 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3118 table->ACPILevel.SclkFrequency, false, &dividers);
3119 if (ret)
3120 return ret;
3121
3122 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3123 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3124 table->ACPILevel.DeepSleepDivId = 0;
3125
3126 spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3127 spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3128
3129 spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3130 spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3131
3132 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3133 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3134 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3135 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3136 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3137 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3138 table->ACPILevel.CcPwrDynRm = 0;
3139 table->ACPILevel.CcPwrDynRm1 = 0;
3140
3141 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3142 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3143 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3144 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3145 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3146 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3147 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3148 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3149 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3150 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3151 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3152
3153 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3154 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3155
3156 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3157 if (pi->acpi_vddci)
3158 table->MemoryACPILevel.MinVddci =
3159 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3160 else
3161 table->MemoryACPILevel.MinVddci =
3162 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3163 }
3164
3165 if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3166 table->MemoryACPILevel.MinMvdd = 0;
3167 else
3168 table->MemoryACPILevel.MinMvdd =
3169 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3170
3171 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3172 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3173 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3174 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3175
3176 dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3177
3178 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3179 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3180 table->MemoryACPILevel.MpllAdFuncCntl =
3181 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3182 table->MemoryACPILevel.MpllDqFuncCntl =
3183 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3184 table->MemoryACPILevel.MpllFuncCntl =
3185 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3186 table->MemoryACPILevel.MpllFuncCntl_1 =
3187 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3188 table->MemoryACPILevel.MpllFuncCntl_2 =
3189 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3190 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3191 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3192
3193 table->MemoryACPILevel.EnabledForThrottle = 0;
3194 table->MemoryACPILevel.EnabledForActivity = 0;
3195 table->MemoryACPILevel.UpH = 0;
3196 table->MemoryACPILevel.DownH = 100;
3197 table->MemoryACPILevel.VoltageDownH = 0;
3198 table->MemoryACPILevel.ActivityLevel =
3199 cpu_to_be16((u16)pi->mclk_activity_target);
3200
3201 table->MemoryACPILevel.StutterEnable = false;
3202 table->MemoryACPILevel.StrobeEnable = false;
3203 table->MemoryACPILevel.EdcReadEnable = false;
3204 table->MemoryACPILevel.EdcWriteEnable = false;
3205 table->MemoryACPILevel.RttEnable = false;
3206
3207 return 0;
3208}
3209
3210
3211static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3212{
3213 struct ci_power_info *pi = ci_get_pi(adev);
3214 struct ci_ulv_parm *ulv = &pi->ulv;
3215
3216 if (ulv->supported) {
3217 if (enable)
3218 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3219 0 : -EINVAL;
3220 else
3221 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3222 0 : -EINVAL;
3223 }
3224
3225 return 0;
3226}
3227
3228static int ci_populate_ulv_level(struct amdgpu_device *adev,
3229 SMU7_Discrete_Ulv *state)
3230{
3231 struct ci_power_info *pi = ci_get_pi(adev);
3232 u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3233
3234 state->CcPwrDynRm = 0;
3235 state->CcPwrDynRm1 = 0;
3236
3237 if (ulv_voltage == 0) {
3238 pi->ulv.supported = false;
3239 return 0;
3240 }
3241
3242 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3243 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3244 state->VddcOffset = 0;
3245 else
3246 state->VddcOffset =
3247 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3248 } else {
3249 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3250 state->VddcOffsetVid = 0;
3251 else
3252 state->VddcOffsetVid = (u8)
3253 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3254 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3255 }
3256 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3257
3258 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3259 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3260 state->VddcOffset = cpu_to_be16(state->VddcOffset);
3261
3262 return 0;
3263}
3264
3265static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3266 u32 engine_clock,
3267 SMU7_Discrete_GraphicsLevel *sclk)
3268{
3269 struct ci_power_info *pi = ci_get_pi(adev);
3270 struct atom_clock_dividers dividers;
3271 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3272 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3273 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3274 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3275 u32 reference_clock = adev->clock.spll.reference_freq;
3276 u32 reference_divider;
3277 u32 fbdiv;
3278 int ret;
3279
3280 ret = amdgpu_atombios_get_clock_dividers(adev,
3281 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3282 engine_clock, false, &dividers);
3283 if (ret)
3284 return ret;
3285
3286 reference_divider = 1 + dividers.ref_div;
3287 fbdiv = dividers.fb_div & 0x3FFFFFF;
3288
3289 spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3290 spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3291 spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3292
3293 if (pi->caps_sclk_ss_support) {
3294 struct amdgpu_atom_ss ss;
3295 u32 vco_freq = engine_clock * dividers.post_div;
3296
3297 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3298 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3299 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3300 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3301
3302 cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3303 cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3304 cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3305
3306 cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3307 cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3308 }
3309 }
3310
3311 sclk->SclkFrequency = engine_clock;
3312 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3313 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3314 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3315 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
3316 sclk->SclkDid = (u8)dividers.post_divider;
3317
3318 return 0;
3319}
3320
3321static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3322 u32 engine_clock,
3323 u16 sclk_activity_level_t,
3324 SMU7_Discrete_GraphicsLevel *graphic_level)
3325{
3326 struct ci_power_info *pi = ci_get_pi(adev);
3327 int ret;
3328
3329 ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3330 if (ret)
3331 return ret;
3332
3333 ret = ci_get_dependency_volt_by_clk(adev,
3334 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3335 engine_clock, &graphic_level->MinVddc);
3336 if (ret)
3337 return ret;
3338
3339 graphic_level->SclkFrequency = engine_clock;
3340
3341 graphic_level->Flags = 0;
3342 graphic_level->MinVddcPhases = 1;
3343
3344 if (pi->vddc_phase_shed_control)
3345 ci_populate_phase_value_based_on_sclk(adev,
3346 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3347 engine_clock,
3348 &graphic_level->MinVddcPhases);
3349
3350 graphic_level->ActivityLevel = sclk_activity_level_t;
3351
3352 graphic_level->CcPwrDynRm = 0;
3353 graphic_level->CcPwrDynRm1 = 0;
3354 graphic_level->EnabledForThrottle = 1;
3355 graphic_level->UpH = 0;
3356 graphic_level->DownH = 0;
3357 graphic_level->VoltageDownH = 0;
3358 graphic_level->PowerThrottle = 0;
3359
3360 if (pi->caps_sclk_ds)
3361 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(adev,
3362 engine_clock,
3363 CISLAND_MINIMUM_ENGINE_CLOCK);
3364
3365 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3366
3367 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3368 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3369 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3370 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3371 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3372 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3373 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3374 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3375 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3376 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3377 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3378 graphic_level->EnabledForActivity = 1;
3379
3380 return 0;
3381}
3382
3383static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3384{
3385 struct ci_power_info *pi = ci_get_pi(adev);
3386 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3387 u32 level_array_address = pi->dpm_table_start +
3388 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3389 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3390 SMU7_MAX_LEVELS_GRAPHICS;
3391 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3392 u32 i, ret;
3393
3394 memset(levels, 0, level_array_size);
3395
3396 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3397 ret = ci_populate_single_graphic_level(adev,
3398 dpm_table->sclk_table.dpm_levels[i].value,
3399 (u16)pi->activity_target[i],
3400 &pi->smc_state_table.GraphicsLevel[i]);
3401 if (ret)
3402 return ret;
3403 if (i > 1)
3404 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3405 if (i == (dpm_table->sclk_table.count - 1))
3406 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3407 PPSMC_DISPLAY_WATERMARK_HIGH;
3408 }
3409
3410 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3411 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3412 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3413
3414 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3415 (u8 *)levels, level_array_size,
3416 pi->sram_end);
3417 if (ret)
3418 return ret;
3419
3420 return 0;
3421}
3422
3423static int ci_populate_ulv_state(struct amdgpu_device *adev,
3424 SMU7_Discrete_Ulv *ulv_level)
3425{
3426 return ci_populate_ulv_level(adev, ulv_level);
3427}
3428
3429static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3430{
3431 struct ci_power_info *pi = ci_get_pi(adev);
3432 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3433 u32 level_array_address = pi->dpm_table_start +
3434 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3435 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3436 SMU7_MAX_LEVELS_MEMORY;
3437 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3438 u32 i, ret;
3439
3440 memset(levels, 0, level_array_size);
3441
3442 for (i = 0; i < dpm_table->mclk_table.count; i++) {
3443 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3444 return -EINVAL;
3445 ret = ci_populate_single_memory_level(adev,
3446 dpm_table->mclk_table.dpm_levels[i].value,
3447 &pi->smc_state_table.MemoryLevel[i]);
3448 if (ret)
3449 return ret;
3450 }
3451
3452 if ((dpm_table->mclk_table.count >= 2) &&
3453 ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3454 pi->smc_state_table.MemoryLevel[1].MinVddc =
3455 pi->smc_state_table.MemoryLevel[0].MinVddc;
3456 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3457 pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3458 }
3459
3460 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3461
3462 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3463 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3464 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3465
3466 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3467 PPSMC_DISPLAY_WATERMARK_HIGH;
3468
3469 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3470 (u8 *)levels, level_array_size,
3471 pi->sram_end);
3472 if (ret)
3473 return ret;
3474
3475 return 0;
3476}
3477
3478static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3479 struct ci_single_dpm_table* dpm_table,
3480 u32 count)
3481{
3482 u32 i;
3483
3484 dpm_table->count = count;
3485 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3486 dpm_table->dpm_levels[i].enabled = false;
3487}
3488
3489static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3490 u32 index, u32 pcie_gen, u32 pcie_lanes)
3491{
3492 dpm_table->dpm_levels[index].value = pcie_gen;
3493 dpm_table->dpm_levels[index].param1 = pcie_lanes;
3494 dpm_table->dpm_levels[index].enabled = true;
3495}
3496
3497static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3498{
3499 struct ci_power_info *pi = ci_get_pi(adev);
3500
3501 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3502 return -EINVAL;
3503
3504 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3505 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3506 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3507 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3508 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3509 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3510 }
3511
3512 ci_reset_single_dpm_table(adev,
3513 &pi->dpm_table.pcie_speed_table,
3514 SMU7_MAX_LEVELS_LINK);
3515
3516 if (adev->asic_type == CHIP_BONAIRE)
3517 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3518 pi->pcie_gen_powersaving.min,
3519 pi->pcie_lane_powersaving.max);
3520 else
3521 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3522 pi->pcie_gen_powersaving.min,
3523 pi->pcie_lane_powersaving.min);
3524 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3525 pi->pcie_gen_performance.min,
3526 pi->pcie_lane_performance.min);
3527 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3528 pi->pcie_gen_powersaving.min,
3529 pi->pcie_lane_powersaving.max);
3530 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3531 pi->pcie_gen_performance.min,
3532 pi->pcie_lane_performance.max);
3533 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3534 pi->pcie_gen_powersaving.max,
3535 pi->pcie_lane_powersaving.max);
3536 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3537 pi->pcie_gen_performance.max,
3538 pi->pcie_lane_performance.max);
3539
3540 pi->dpm_table.pcie_speed_table.count = 6;
3541
3542 return 0;
3543}
3544
3545static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3546{
3547 struct ci_power_info *pi = ci_get_pi(adev);
3548 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3549 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3550 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3551 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3552 struct amdgpu_cac_leakage_table *std_voltage_table =
3553 &adev->pm.dpm.dyn_state.cac_leakage_table;
3554 u32 i;
3555
3556 if (allowed_sclk_vddc_table == NULL)
3557 return -EINVAL;
3558 if (allowed_sclk_vddc_table->count < 1)
3559 return -EINVAL;
3560 if (allowed_mclk_table == NULL)
3561 return -EINVAL;
3562 if (allowed_mclk_table->count < 1)
3563 return -EINVAL;
3564
3565 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3566
3567 ci_reset_single_dpm_table(adev,
3568 &pi->dpm_table.sclk_table,
3569 SMU7_MAX_LEVELS_GRAPHICS);
3570 ci_reset_single_dpm_table(adev,
3571 &pi->dpm_table.mclk_table,
3572 SMU7_MAX_LEVELS_MEMORY);
3573 ci_reset_single_dpm_table(adev,
3574 &pi->dpm_table.vddc_table,
3575 SMU7_MAX_LEVELS_VDDC);
3576 ci_reset_single_dpm_table(adev,
3577 &pi->dpm_table.vddci_table,
3578 SMU7_MAX_LEVELS_VDDCI);
3579 ci_reset_single_dpm_table(adev,
3580 &pi->dpm_table.mvdd_table,
3581 SMU7_MAX_LEVELS_MVDD);
3582
3583 pi->dpm_table.sclk_table.count = 0;
3584 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3585 if ((i == 0) ||
3586 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3587 allowed_sclk_vddc_table->entries[i].clk)) {
3588 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3589 allowed_sclk_vddc_table->entries[i].clk;
3590 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3591 (i == 0) ? true : false;
3592 pi->dpm_table.sclk_table.count++;
3593 }
3594 }
3595
3596 pi->dpm_table.mclk_table.count = 0;
3597 for (i = 0; i < allowed_mclk_table->count; i++) {
3598 if ((i == 0) ||
3599 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3600 allowed_mclk_table->entries[i].clk)) {
3601 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3602 allowed_mclk_table->entries[i].clk;
3603 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3604 (i == 0) ? true : false;
3605 pi->dpm_table.mclk_table.count++;
3606 }
3607 }
3608
3609 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3610 pi->dpm_table.vddc_table.dpm_levels[i].value =
3611 allowed_sclk_vddc_table->entries[i].v;
3612 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3613 std_voltage_table->entries[i].leakage;
3614 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3615 }
3616 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3617
3618 allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3619 if (allowed_mclk_table) {
3620 for (i = 0; i < allowed_mclk_table->count; i++) {
3621 pi->dpm_table.vddci_table.dpm_levels[i].value =
3622 allowed_mclk_table->entries[i].v;
3623 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3624 }
3625 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3626 }
3627
3628 allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3629 if (allowed_mclk_table) {
3630 for (i = 0; i < allowed_mclk_table->count; i++) {
3631 pi->dpm_table.mvdd_table.dpm_levels[i].value =
3632 allowed_mclk_table->entries[i].v;
3633 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3634 }
3635 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3636 }
3637
3638 ci_setup_default_pcie_tables(adev);
3639
3640 return 0;
3641}
3642
3643static int ci_find_boot_level(struct ci_single_dpm_table *table,
3644 u32 value, u32 *boot_level)
3645{
3646 u32 i;
3647 int ret = -EINVAL;
3648
3649 for(i = 0; i < table->count; i++) {
3650 if (value == table->dpm_levels[i].value) {
3651 *boot_level = i;
3652 ret = 0;
3653 }
3654 }
3655
3656 return ret;
3657}
3658
3659static int ci_init_smc_table(struct amdgpu_device *adev)
3660{
3661 struct ci_power_info *pi = ci_get_pi(adev);
3662 struct ci_ulv_parm *ulv = &pi->ulv;
3663 struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3664 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3665 int ret;
3666
3667 ret = ci_setup_default_dpm_tables(adev);
3668 if (ret)
3669 return ret;
3670
3671 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3672 ci_populate_smc_voltage_tables(adev, table);
3673
3674 ci_init_fps_limits(adev);
3675
3676 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3677 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3678
3679 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3680 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3681
81c59f54 3682 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
a2e73f56
AD
3683 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3684
3685 if (ulv->supported) {
3686 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3687 if (ret)
3688 return ret;
3689 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3690 }
3691
3692 ret = ci_populate_all_graphic_levels(adev);
3693 if (ret)
3694 return ret;
3695
3696 ret = ci_populate_all_memory_levels(adev);
3697 if (ret)
3698 return ret;
3699
3700 ci_populate_smc_link_level(adev, table);
3701
3702 ret = ci_populate_smc_acpi_level(adev, table);
3703 if (ret)
3704 return ret;
3705
3706 ret = ci_populate_smc_vce_level(adev, table);
3707 if (ret)
3708 return ret;
3709
3710 ret = ci_populate_smc_acp_level(adev, table);
3711 if (ret)
3712 return ret;
3713
3714 ret = ci_populate_smc_samu_level(adev, table);
3715 if (ret)
3716 return ret;
3717
3718 ret = ci_do_program_memory_timing_parameters(adev);
3719 if (ret)
3720 return ret;
3721
3722 ret = ci_populate_smc_uvd_level(adev, table);
3723 if (ret)
3724 return ret;
3725
3726 table->UvdBootLevel = 0;
3727 table->VceBootLevel = 0;
3728 table->AcpBootLevel = 0;
3729 table->SamuBootLevel = 0;
3730 table->GraphicsBootLevel = 0;
3731 table->MemoryBootLevel = 0;
3732
3733 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3734 pi->vbios_boot_state.sclk_bootup_value,
3735 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3736
3737 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3738 pi->vbios_boot_state.mclk_bootup_value,
3739 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3740
3741 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3742 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3743 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3744
3745 ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3746
3747 ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3748 if (ret)
3749 return ret;
3750
3751 table->UVDInterval = 1;
3752 table->VCEInterval = 1;
3753 table->ACPInterval = 1;
3754 table->SAMUInterval = 1;
3755 table->GraphicsVoltageChangeEnable = 1;
3756 table->GraphicsThermThrottleEnable = 1;
3757 table->GraphicsInterval = 1;
3758 table->VoltageInterval = 1;
3759 table->ThermalInterval = 1;
3760 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3761 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3762 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3763 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3764 table->MemoryVoltageChangeEnable = 1;
3765 table->MemoryInterval = 1;
3766 table->VoltageResponseTime = 0;
3767 table->VddcVddciDelta = 4000;
3768 table->PhaseResponseTime = 0;
3769 table->MemoryThermThrottleEnable = 1;
3770 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3771 table->PCIeGenInterval = 1;
3772 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3773 table->SVI2Enable = 1;
3774 else
3775 table->SVI2Enable = 0;
3776
3777 table->ThermGpio = 17;
3778 table->SclkStepSize = 0x4000;
3779
3780 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3781 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3782 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3783 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3784 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3785 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3786 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3787 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3788 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3789 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3790 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3791 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3792 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3793 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3794
3795 ret = amdgpu_ci_copy_bytes_to_smc(adev,
3796 pi->dpm_table_start +
3797 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3798 (u8 *)&table->SystemFlags,
3799 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3800 pi->sram_end);
3801 if (ret)
3802 return ret;
3803
3804 return 0;
3805}
3806
3807static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3808 struct ci_single_dpm_table *dpm_table,
3809 u32 low_limit, u32 high_limit)
3810{
3811 u32 i;
3812
3813 for (i = 0; i < dpm_table->count; i++) {
3814 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3815 (dpm_table->dpm_levels[i].value > high_limit))
3816 dpm_table->dpm_levels[i].enabled = false;
3817 else
3818 dpm_table->dpm_levels[i].enabled = true;
3819 }
3820}
3821
3822static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3823 u32 speed_low, u32 lanes_low,
3824 u32 speed_high, u32 lanes_high)
3825{
3826 struct ci_power_info *pi = ci_get_pi(adev);
3827 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3828 u32 i, j;
3829
3830 for (i = 0; i < pcie_table->count; i++) {
3831 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3832 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3833 (pcie_table->dpm_levels[i].value > speed_high) ||
3834 (pcie_table->dpm_levels[i].param1 > lanes_high))
3835 pcie_table->dpm_levels[i].enabled = false;
3836 else
3837 pcie_table->dpm_levels[i].enabled = true;
3838 }
3839
3840 for (i = 0; i < pcie_table->count; i++) {
3841 if (pcie_table->dpm_levels[i].enabled) {
3842 for (j = i + 1; j < pcie_table->count; j++) {
3843 if (pcie_table->dpm_levels[j].enabled) {
3844 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3845 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3846 pcie_table->dpm_levels[j].enabled = false;
3847 }
3848 }
3849 }
3850 }
3851}
3852
3853static int ci_trim_dpm_states(struct amdgpu_device *adev,
3854 struct amdgpu_ps *amdgpu_state)
3855{
3856 struct ci_ps *state = ci_get_ps(amdgpu_state);
3857 struct ci_power_info *pi = ci_get_pi(adev);
3858 u32 high_limit_count;
3859
3860 if (state->performance_level_count < 1)
3861 return -EINVAL;
3862
3863 if (state->performance_level_count == 1)
3864 high_limit_count = 0;
3865 else
3866 high_limit_count = 1;
3867
3868 ci_trim_single_dpm_states(adev,
3869 &pi->dpm_table.sclk_table,
3870 state->performance_levels[0].sclk,
3871 state->performance_levels[high_limit_count].sclk);
3872
3873 ci_trim_single_dpm_states(adev,
3874 &pi->dpm_table.mclk_table,
3875 state->performance_levels[0].mclk,
3876 state->performance_levels[high_limit_count].mclk);
3877
3878 ci_trim_pcie_dpm_states(adev,
3879 state->performance_levels[0].pcie_gen,
3880 state->performance_levels[0].pcie_lane,
3881 state->performance_levels[high_limit_count].pcie_gen,
3882 state->performance_levels[high_limit_count].pcie_lane);
3883
3884 return 0;
3885}
3886
3887static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3888{
3889 struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3890 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3891 struct amdgpu_clock_voltage_dependency_table *vddc_table =
3892 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3893 u32 requested_voltage = 0;
3894 u32 i;
3895
3896 if (disp_voltage_table == NULL)
3897 return -EINVAL;
3898 if (!disp_voltage_table->count)
3899 return -EINVAL;
3900
3901 for (i = 0; i < disp_voltage_table->count; i++) {
3902 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3903 requested_voltage = disp_voltage_table->entries[i].v;
3904 }
3905
3906 for (i = 0; i < vddc_table->count; i++) {
3907 if (requested_voltage <= vddc_table->entries[i].v) {
3908 requested_voltage = vddc_table->entries[i].v;
3909 return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3910 PPSMC_MSG_VddC_Request,
3911 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3912 0 : -EINVAL;
3913 }
3914 }
3915
3916 return -EINVAL;
3917}
3918
3919static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3920{
3921 struct ci_power_info *pi = ci_get_pi(adev);
3922 PPSMC_Result result;
3923
3924 ci_apply_disp_minimum_voltage_request(adev);
3925
3926 if (!pi->sclk_dpm_key_disabled) {
3927 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3928 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3929 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3930 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3931 if (result != PPSMC_Result_OK)
3932 return -EINVAL;
3933 }
3934 }
3935
3936 if (!pi->mclk_dpm_key_disabled) {
3937 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3938 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3939 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3940 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3941 if (result != PPSMC_Result_OK)
3942 return -EINVAL;
3943 }
3944 }
3945
3946#if 0
3947 if (!pi->pcie_dpm_key_disabled) {
3948 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3949 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3950 PPSMC_MSG_PCIeDPM_SetEnabledMask,
3951 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3952 if (result != PPSMC_Result_OK)
3953 return -EINVAL;
3954 }
3955 }
3956#endif
3957
3958 return 0;
3959}
3960
3961static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
3962 struct amdgpu_ps *amdgpu_state)
3963{
3964 struct ci_power_info *pi = ci_get_pi(adev);
3965 struct ci_ps *state = ci_get_ps(amdgpu_state);
3966 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3967 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3968 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3969 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3970 u32 i;
3971
3972 pi->need_update_smu7_dpm_table = 0;
3973
3974 for (i = 0; i < sclk_table->count; i++) {
3975 if (sclk == sclk_table->dpm_levels[i].value)
3976 break;
3977 }
3978
3979 if (i >= sclk_table->count) {
3980 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3981 } else {
3982 /* XXX check display min clock requirements */
3983 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3984 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3985 }
3986
3987 for (i = 0; i < mclk_table->count; i++) {
3988 if (mclk == mclk_table->dpm_levels[i].value)
3989 break;
3990 }
3991
3992 if (i >= mclk_table->count)
3993 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3994
3995 if (adev->pm.dpm.current_active_crtc_count !=
3996 adev->pm.dpm.new_active_crtc_count)
3997 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3998}
3999
4000static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4001 struct amdgpu_ps *amdgpu_state)
4002{
4003 struct ci_power_info *pi = ci_get_pi(adev);
4004 struct ci_ps *state = ci_get_ps(amdgpu_state);
4005 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4006 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4007 struct ci_dpm_table *dpm_table = &pi->dpm_table;
4008 int ret;
4009
4010 if (!pi->need_update_smu7_dpm_table)
4011 return 0;
4012
4013 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4014 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4015
4016 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4017 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4018
4019 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4020 ret = ci_populate_all_graphic_levels(adev);
4021 if (ret)
4022 return ret;
4023 }
4024
4025 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4026 ret = ci_populate_all_memory_levels(adev);
4027 if (ret)
4028 return ret;
4029 }
4030
4031 return 0;
4032}
4033
4034static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4035{
4036 struct ci_power_info *pi = ci_get_pi(adev);
4037 const struct amdgpu_clock_and_voltage_limits *max_limits;
4038 int i;
4039
4040 if (adev->pm.dpm.ac_power)
4041 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4042 else
4043 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4044
4045 if (enable) {
4046 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4047
4048 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4049 if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4050 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4051
4052 if (!pi->caps_uvd_dpm)
4053 break;
4054 }
4055 }
4056
4057 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4058 PPSMC_MSG_UVDDPM_SetEnabledMask,
4059 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4060
4061 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4062 pi->uvd_enabled = true;
4063 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4064 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4065 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4066 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4067 }
4068 } else {
4069 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4070 pi->uvd_enabled = false;
4071 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4072 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4073 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4074 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4075 }
4076 }
4077
4078 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4079 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4080 0 : -EINVAL;
4081}
4082
4083static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4084{
4085 struct ci_power_info *pi = ci_get_pi(adev);
4086 const struct amdgpu_clock_and_voltage_limits *max_limits;
4087 int i;
4088
4089 if (adev->pm.dpm.ac_power)
4090 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4091 else
4092 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4093
4094 if (enable) {
4095 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4096 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4097 if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4098 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4099
4100 if (!pi->caps_vce_dpm)
4101 break;
4102 }
4103 }
4104
4105 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4106 PPSMC_MSG_VCEDPM_SetEnabledMask,
4107 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4108 }
4109
4110 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4111 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4112 0 : -EINVAL;
4113}
4114
4115#if 0
4116static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4117{
4118 struct ci_power_info *pi = ci_get_pi(adev);
4119 const struct amdgpu_clock_and_voltage_limits *max_limits;
4120 int i;
4121
4122 if (adev->pm.dpm.ac_power)
4123 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4124 else
4125 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4126
4127 if (enable) {
4128 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4129 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4130 if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4131 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4132
4133 if (!pi->caps_samu_dpm)
4134 break;
4135 }
4136 }
4137
4138 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4139 PPSMC_MSG_SAMUDPM_SetEnabledMask,
4140 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4141 }
4142 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4143 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4144 0 : -EINVAL;
4145}
4146
4147static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4148{
4149 struct ci_power_info *pi = ci_get_pi(adev);
4150 const struct amdgpu_clock_and_voltage_limits *max_limits;
4151 int i;
4152
4153 if (adev->pm.dpm.ac_power)
4154 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4155 else
4156 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4157
4158 if (enable) {
4159 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4160 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4161 if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4162 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4163
4164 if (!pi->caps_acp_dpm)
4165 break;
4166 }
4167 }
4168
4169 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4170 PPSMC_MSG_ACPDPM_SetEnabledMask,
4171 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4172 }
4173
4174 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4175 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4176 0 : -EINVAL;
4177}
4178#endif
4179
4180static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4181{
4182 struct ci_power_info *pi = ci_get_pi(adev);
4183 u32 tmp;
4184
4185 if (!gate) {
4186 if (pi->caps_uvd_dpm ||
4187 (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4188 pi->smc_state_table.UvdBootLevel = 0;
4189 else
4190 pi->smc_state_table.UvdBootLevel =
4191 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4192
4193 tmp = RREG32_SMC(ixDPM_TABLE_475);
4194 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4195 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4196 WREG32_SMC(ixDPM_TABLE_475, tmp);
4197 }
4198
4199 return ci_enable_uvd_dpm(adev, !gate);
4200}
4201
4202static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4203{
4204 u8 i;
4205 u32 min_evclk = 30000; /* ??? */
4206 struct amdgpu_vce_clock_voltage_dependency_table *table =
4207 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4208
4209 for (i = 0; i < table->count; i++) {
4210 if (table->entries[i].evclk >= min_evclk)
4211 return i;
4212 }
4213
4214 return table->count - 1;
4215}
4216
4217static int ci_update_vce_dpm(struct amdgpu_device *adev,
4218 struct amdgpu_ps *amdgpu_new_state,
4219 struct amdgpu_ps *amdgpu_current_state)
4220{
4221 struct ci_power_info *pi = ci_get_pi(adev);
4222 int ret = 0;
4223 u32 tmp;
4224
4225 if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4226 if (amdgpu_new_state->evclk) {
4227 /* turn the clocks on when encoding */
5fc3aeeb 4228 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4229 AMD_CG_STATE_UNGATE);
a2e73f56
AD
4230 if (ret)
4231 return ret;
4232
4233 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4234 tmp = RREG32_SMC(ixDPM_TABLE_475);
4235 tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4236 tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4237 WREG32_SMC(ixDPM_TABLE_475, tmp);
4238
4239 ret = ci_enable_vce_dpm(adev, true);
4240 } else {
4241 /* turn the clocks off when not encoding */
5fc3aeeb 4242 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4243 AMD_CG_STATE_GATE);
a2e73f56
AD
4244 if (ret)
4245 return ret;
4246
4247 ret = ci_enable_vce_dpm(adev, false);
4248 }
4249 }
4250 return ret;
4251}
4252
4253#if 0
4254static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4255{
4256 return ci_enable_samu_dpm(adev, gate);
4257}
4258
4259static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4260{
4261 struct ci_power_info *pi = ci_get_pi(adev);
4262 u32 tmp;
4263
4264 if (!gate) {
4265 pi->smc_state_table.AcpBootLevel = 0;
4266
4267 tmp = RREG32_SMC(ixDPM_TABLE_475);
4268 tmp &= ~AcpBootLevel_MASK;
4269 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4270 WREG32_SMC(ixDPM_TABLE_475, tmp);
4271 }
4272
4273 return ci_enable_acp_dpm(adev, !gate);
4274}
4275#endif
4276
4277static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4278 struct amdgpu_ps *amdgpu_state)
4279{
4280 struct ci_power_info *pi = ci_get_pi(adev);
4281 int ret;
4282
4283 ret = ci_trim_dpm_states(adev, amdgpu_state);
4284 if (ret)
4285 return ret;
4286
4287 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4288 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4289 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4290 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4291 pi->last_mclk_dpm_enable_mask =
4292 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4293 if (pi->uvd_enabled) {
4294 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4295 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4296 }
4297 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4298 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4299
4300 return 0;
4301}
4302
4303static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4304 u32 level_mask)
4305{
4306 u32 level = 0;
4307
4308 while ((level_mask & (1 << level)) == 0)
4309 level++;
4310
4311 return level;
4312}
4313
4314
4315static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
4316 enum amdgpu_dpm_forced_level level)
4317{
4318 struct ci_power_info *pi = ci_get_pi(adev);
4319 u32 tmp, levels, i;
4320 int ret;
4321
4322 if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
4323 if ((!pi->pcie_dpm_key_disabled) &&
4324 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4325 levels = 0;
4326 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4327 while (tmp >>= 1)
4328 levels++;
4329 if (levels) {
4330 ret = ci_dpm_force_state_pcie(adev, level);
4331 if (ret)
4332 return ret;
4333 for (i = 0; i < adev->usec_timeout; i++) {
4334 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4335 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4336 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4337 if (tmp == levels)
4338 break;
4339 udelay(1);
4340 }
4341 }
4342 }
4343 if ((!pi->sclk_dpm_key_disabled) &&
4344 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4345 levels = 0;
4346 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4347 while (tmp >>= 1)
4348 levels++;
4349 if (levels) {
4350 ret = ci_dpm_force_state_sclk(adev, levels);
4351 if (ret)
4352 return ret;
4353 for (i = 0; i < adev->usec_timeout; i++) {
4354 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4355 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4356 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4357 if (tmp == levels)
4358 break;
4359 udelay(1);
4360 }
4361 }
4362 }
4363 if ((!pi->mclk_dpm_key_disabled) &&
4364 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4365 levels = 0;
4366 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4367 while (tmp >>= 1)
4368 levels++;
4369 if (levels) {
4370 ret = ci_dpm_force_state_mclk(adev, levels);
4371 if (ret)
4372 return ret;
4373 for (i = 0; i < adev->usec_timeout; i++) {
4374 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4375 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4376 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4377 if (tmp == levels)
4378 break;
4379 udelay(1);
4380 }
4381 }
4382 }
4383 if ((!pi->pcie_dpm_key_disabled) &&
4384 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4385 levels = 0;
4386 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4387 while (tmp >>= 1)
4388 levels++;
4389 if (levels) {
4390 ret = ci_dpm_force_state_pcie(adev, level);
4391 if (ret)
4392 return ret;
4393 for (i = 0; i < adev->usec_timeout; i++) {
4394 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4395 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4396 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4397 if (tmp == levels)
4398 break;
4399 udelay(1);
4400 }
4401 }
4402 }
4403 } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
4404 if ((!pi->sclk_dpm_key_disabled) &&
4405 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4406 levels = ci_get_lowest_enabled_level(adev,
4407 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4408 ret = ci_dpm_force_state_sclk(adev, levels);
4409 if (ret)
4410 return ret;
4411 for (i = 0; i < adev->usec_timeout; i++) {
4412 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4413 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4414 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4415 if (tmp == levels)
4416 break;
4417 udelay(1);
4418 }
4419 }
4420 if ((!pi->mclk_dpm_key_disabled) &&
4421 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4422 levels = ci_get_lowest_enabled_level(adev,
4423 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4424 ret = ci_dpm_force_state_mclk(adev, levels);
4425 if (ret)
4426 return ret;
4427 for (i = 0; i < adev->usec_timeout; i++) {
4428 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4429 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4430 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4431 if (tmp == levels)
4432 break;
4433 udelay(1);
4434 }
4435 }
4436 if ((!pi->pcie_dpm_key_disabled) &&
4437 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4438 levels = ci_get_lowest_enabled_level(adev,
4439 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4440 ret = ci_dpm_force_state_pcie(adev, levels);
4441 if (ret)
4442 return ret;
4443 for (i = 0; i < adev->usec_timeout; i++) {
4444 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4445 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4446 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4447 if (tmp == levels)
4448 break;
4449 udelay(1);
4450 }
4451 }
4452 } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
4453 if (!pi->pcie_dpm_key_disabled) {
4454 PPSMC_Result smc_result;
4455
4456 smc_result = amdgpu_ci_send_msg_to_smc(adev,
4457 PPSMC_MSG_PCIeDPM_UnForceLevel);
4458 if (smc_result != PPSMC_Result_OK)
4459 return -EINVAL;
4460 }
4461 ret = ci_upload_dpm_level_enable_mask(adev);
4462 if (ret)
4463 return ret;
4464 }
4465
4466 adev->pm.dpm.forced_level = level;
4467
4468 return 0;
4469}
4470
4471static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4472 struct ci_mc_reg_table *table)
4473{
4474 u8 i, j, k;
4475 u32 temp_reg;
4476
4477 for (i = 0, j = table->last; i < table->last; i++) {
4478 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4479 return -EINVAL;
4480 switch(table->mc_reg_address[i].s1) {
4481 case mmMC_SEQ_MISC1:
4482 temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4483 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4484 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4485 for (k = 0; k < table->num_entries; k++) {
4486 table->mc_reg_table_entry[k].mc_data[j] =
4487 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4488 }
4489 j++;
4490 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4491 return -EINVAL;
4492
4493 temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4494 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4495 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4496 for (k = 0; k < table->num_entries; k++) {
4497 table->mc_reg_table_entry[k].mc_data[j] =
4498 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
81c59f54 4499 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
a2e73f56
AD
4500 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4501 }
4502 j++;
4503 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4504 return -EINVAL;
4505
81c59f54 4506 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
a2e73f56
AD
4507 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4508 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4509 for (k = 0; k < table->num_entries; k++) {
4510 table->mc_reg_table_entry[k].mc_data[j] =
4511 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4512 }
4513 j++;
4514 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4515 return -EINVAL;
4516 }
4517 break;
4518 case mmMC_SEQ_RESERVE_M:
4519 temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4520 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4521 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4522 for (k = 0; k < table->num_entries; k++) {
4523 table->mc_reg_table_entry[k].mc_data[j] =
4524 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4525 }
4526 j++;
4527 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4528 return -EINVAL;
4529 break;
4530 default:
4531 break;
4532 }
4533
4534 }
4535
4536 table->last = j;
4537
4538 return 0;
4539}
4540
4541static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4542{
4543 bool result = true;
4544
4545 switch(in_reg) {
4546 case mmMC_SEQ_RAS_TIMING:
4547 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4548 break;
4549 case mmMC_SEQ_DLL_STBY:
4550 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4551 break;
4552 case mmMC_SEQ_G5PDX_CMD0:
4553 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4554 break;
4555 case mmMC_SEQ_G5PDX_CMD1:
4556 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4557 break;
4558 case mmMC_SEQ_G5PDX_CTRL:
4559 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4560 break;
4561 case mmMC_SEQ_CAS_TIMING:
4562 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4563 break;
4564 case mmMC_SEQ_MISC_TIMING:
4565 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4566 break;
4567 case mmMC_SEQ_MISC_TIMING2:
4568 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4569 break;
4570 case mmMC_SEQ_PMG_DVS_CMD:
4571 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4572 break;
4573 case mmMC_SEQ_PMG_DVS_CTL:
4574 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4575 break;
4576 case mmMC_SEQ_RD_CTL_D0:
4577 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4578 break;
4579 case mmMC_SEQ_RD_CTL_D1:
4580 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4581 break;
4582 case mmMC_SEQ_WR_CTL_D0:
4583 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4584 break;
4585 case mmMC_SEQ_WR_CTL_D1:
4586 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4587 break;
4588 case mmMC_PMG_CMD_EMRS:
4589 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4590 break;
4591 case mmMC_PMG_CMD_MRS:
4592 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4593 break;
4594 case mmMC_PMG_CMD_MRS1:
4595 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4596 break;
4597 case mmMC_SEQ_PMG_TIMING:
4598 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4599 break;
4600 case mmMC_PMG_CMD_MRS2:
4601 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4602 break;
4603 case mmMC_SEQ_WR_CTL_2:
4604 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4605 break;
4606 default:
4607 result = false;
4608 break;
4609 }
4610
4611 return result;
4612}
4613
4614static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4615{
4616 u8 i, j;
4617
4618 for (i = 0; i < table->last; i++) {
4619 for (j = 1; j < table->num_entries; j++) {
4620 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4621 table->mc_reg_table_entry[j].mc_data[i]) {
4622 table->valid_flag |= 1 << i;
4623 break;
4624 }
4625 }
4626 }
4627}
4628
4629static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4630{
4631 u32 i;
4632 u16 address;
4633
4634 for (i = 0; i < table->last; i++) {
4635 table->mc_reg_address[i].s0 =
4636 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4637 address : table->mc_reg_address[i].s1;
4638 }
4639}
4640
4641static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4642 struct ci_mc_reg_table *ci_table)
4643{
4644 u8 i, j;
4645
4646 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4647 return -EINVAL;
4648 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4649 return -EINVAL;
4650
4651 for (i = 0; i < table->last; i++)
4652 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4653
4654 ci_table->last = table->last;
4655
4656 for (i = 0; i < table->num_entries; i++) {
4657 ci_table->mc_reg_table_entry[i].mclk_max =
4658 table->mc_reg_table_entry[i].mclk_max;
4659 for (j = 0; j < table->last; j++)
4660 ci_table->mc_reg_table_entry[i].mc_data[j] =
4661 table->mc_reg_table_entry[i].mc_data[j];
4662 }
4663 ci_table->num_entries = table->num_entries;
4664
4665 return 0;
4666}
4667
4668static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4669 struct ci_mc_reg_table *table)
4670{
4671 u8 i, k;
4672 u32 tmp;
4673 bool patch;
4674
4675 tmp = RREG32(mmMC_SEQ_MISC0);
4676 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4677
4678 if (patch &&
4679 ((adev->pdev->device == 0x67B0) ||
4680 (adev->pdev->device == 0x67B1))) {
4681 for (i = 0; i < table->last; i++) {
4682 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4683 return -EINVAL;
4684 switch (table->mc_reg_address[i].s1) {
4685 case mmMC_SEQ_MISC1:
4686 for (k = 0; k < table->num_entries; k++) {
4687 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4688 (table->mc_reg_table_entry[k].mclk_max == 137500))
4689 table->mc_reg_table_entry[k].mc_data[i] =
4690 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4691 0x00000007;
4692 }
4693 break;
4694 case mmMC_SEQ_WR_CTL_D0:
4695 for (k = 0; k < table->num_entries; k++) {
4696 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4697 (table->mc_reg_table_entry[k].mclk_max == 137500))
4698 table->mc_reg_table_entry[k].mc_data[i] =
4699 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4700 0x0000D0DD;
4701 }
4702 break;
4703 case mmMC_SEQ_WR_CTL_D1:
4704 for (k = 0; k < table->num_entries; k++) {
4705 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4706 (table->mc_reg_table_entry[k].mclk_max == 137500))
4707 table->mc_reg_table_entry[k].mc_data[i] =
4708 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4709 0x0000D0DD;
4710 }
4711 break;
4712 case mmMC_SEQ_WR_CTL_2:
4713 for (k = 0; k < table->num_entries; k++) {
4714 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4715 (table->mc_reg_table_entry[k].mclk_max == 137500))
4716 table->mc_reg_table_entry[k].mc_data[i] = 0;
4717 }
4718 break;
4719 case mmMC_SEQ_CAS_TIMING:
4720 for (k = 0; k < table->num_entries; k++) {
4721 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4722 table->mc_reg_table_entry[k].mc_data[i] =
4723 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4724 0x000C0140;
4725 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4726 table->mc_reg_table_entry[k].mc_data[i] =
4727 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4728 0x000C0150;
4729 }
4730 break;
4731 case mmMC_SEQ_MISC_TIMING:
4732 for (k = 0; k < table->num_entries; k++) {
4733 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4734 table->mc_reg_table_entry[k].mc_data[i] =
4735 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4736 0x00000030;
4737 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4738 table->mc_reg_table_entry[k].mc_data[i] =
4739 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4740 0x00000035;
4741 }
4742 break;
4743 default:
4744 break;
4745 }
4746 }
4747
4748 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4749 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4750 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4751 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4752 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4753 }
4754
4755 return 0;
4756}
4757
4758static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4759{
4760 struct ci_power_info *pi = ci_get_pi(adev);
4761 struct atom_mc_reg_table *table;
4762 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4763 u8 module_index = ci_get_memory_module_index(adev);
4764 int ret;
4765
4766 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4767 if (!table)
4768 return -ENOMEM;
4769
4770 WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4771 WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4772 WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4773 WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4774 WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4775 WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4776 WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4777 WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4778 WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4779 WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4780 WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4781 WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4782 WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4783 WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4784 WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4785 WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4786 WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4787 WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4788 WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4789 WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4790
4791 ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4792 if (ret)
4793 goto init_mc_done;
4794
4795 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4796 if (ret)
4797 goto init_mc_done;
4798
4799 ci_set_s0_mc_reg_index(ci_table);
4800
4801 ret = ci_register_patching_mc_seq(adev, ci_table);
4802 if (ret)
4803 goto init_mc_done;
4804
4805 ret = ci_set_mc_special_registers(adev, ci_table);
4806 if (ret)
4807 goto init_mc_done;
4808
4809 ci_set_valid_flag(ci_table);
4810
4811init_mc_done:
4812 kfree(table);
4813
4814 return ret;
4815}
4816
4817static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4818 SMU7_Discrete_MCRegisters *mc_reg_table)
4819{
4820 struct ci_power_info *pi = ci_get_pi(adev);
4821 u32 i, j;
4822
4823 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4824 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4825 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4826 return -EINVAL;
4827 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4828 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4829 i++;
4830 }
4831 }
4832
4833 mc_reg_table->last = (u8)i;
4834
4835 return 0;
4836}
4837
4838static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4839 SMU7_Discrete_MCRegisterSet *data,
4840 u32 num_entries, u32 valid_flag)
4841{
4842 u32 i, j;
4843
4844 for (i = 0, j = 0; j < num_entries; j++) {
4845 if (valid_flag & (1 << j)) {
4846 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4847 i++;
4848 }
4849 }
4850}
4851
4852static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4853 const u32 memory_clock,
4854 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4855{
4856 struct ci_power_info *pi = ci_get_pi(adev);
4857 u32 i = 0;
4858
4859 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4860 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4861 break;
4862 }
4863
4864 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4865 --i;
4866
4867 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4868 mc_reg_table_data, pi->mc_reg_table.last,
4869 pi->mc_reg_table.valid_flag);
4870}
4871
4872static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4873 SMU7_Discrete_MCRegisters *mc_reg_table)
4874{
4875 struct ci_power_info *pi = ci_get_pi(adev);
4876 u32 i;
4877
4878 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4879 ci_convert_mc_reg_table_entry_to_smc(adev,
4880 pi->dpm_table.mclk_table.dpm_levels[i].value,
4881 &mc_reg_table->data[i]);
4882}
4883
4884static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4885{
4886 struct ci_power_info *pi = ci_get_pi(adev);
4887 int ret;
4888
4889 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4890
4891 ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4892 if (ret)
4893 return ret;
4894 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4895
4896 return amdgpu_ci_copy_bytes_to_smc(adev,
4897 pi->mc_reg_table_start,
4898 (u8 *)&pi->smc_mc_reg_table,
4899 sizeof(SMU7_Discrete_MCRegisters),
4900 pi->sram_end);
4901}
4902
4903static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4904{
4905 struct ci_power_info *pi = ci_get_pi(adev);
4906
4907 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4908 return 0;
4909
4910 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4911
4912 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4913
4914 return amdgpu_ci_copy_bytes_to_smc(adev,
4915 pi->mc_reg_table_start +
4916 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4917 (u8 *)&pi->smc_mc_reg_table.data[0],
4918 sizeof(SMU7_Discrete_MCRegisterSet) *
4919 pi->dpm_table.mclk_table.count,
4920 pi->sram_end);
4921}
4922
4923static void ci_enable_voltage_control(struct amdgpu_device *adev)
4924{
4925 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4926
4927 tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4928 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4929}
4930
4931static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4932 struct amdgpu_ps *amdgpu_state)
4933{
4934 struct ci_ps *state = ci_get_ps(amdgpu_state);
4935 int i;
4936 u16 pcie_speed, max_speed = 0;
4937
4938 for (i = 0; i < state->performance_level_count; i++) {
4939 pcie_speed = state->performance_levels[i].pcie_gen;
4940 if (max_speed < pcie_speed)
4941 max_speed = pcie_speed;
4942 }
4943
4944 return max_speed;
4945}
4946
4947static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4948{
4949 u32 speed_cntl = 0;
4950
4951 speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4952 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4953 speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
4954
4955 return (u16)speed_cntl;
4956}
4957
4958static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
4959{
4960 u32 link_width = 0;
4961
4962 link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
4963 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
4964 link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
4965
4966 switch (link_width) {
4967 case 1:
4968 return 1;
4969 case 2:
4970 return 2;
4971 case 3:
4972 return 4;
4973 case 4:
4974 return 8;
4975 case 0:
4976 case 6:
4977 default:
4978 return 16;
4979 }
4980}
4981
4982static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
4983 struct amdgpu_ps *amdgpu_new_state,
4984 struct amdgpu_ps *amdgpu_current_state)
4985{
4986 struct ci_power_info *pi = ci_get_pi(adev);
4987 enum amdgpu_pcie_gen target_link_speed =
4988 ci_get_maximum_link_speed(adev, amdgpu_new_state);
4989 enum amdgpu_pcie_gen current_link_speed;
4990
4991 if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
4992 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
4993 else
4994 current_link_speed = pi->force_pcie_gen;
4995
4996 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
4997 pi->pspp_notify_required = false;
4998 if (target_link_speed > current_link_speed) {
4999 switch (target_link_speed) {
5000#ifdef CONFIG_ACPI
5001 case AMDGPU_PCIE_GEN3:
5002 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5003 break;
5004 pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
5005 if (current_link_speed == AMDGPU_PCIE_GEN2)
5006 break;
5007 case AMDGPU_PCIE_GEN2:
5008 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5009 break;
5010#endif
5011 default:
5012 pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5013 break;
5014 }
5015 } else {
5016 if (target_link_speed < current_link_speed)
5017 pi->pspp_notify_required = true;
5018 }
5019}
5020
5021static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5022 struct amdgpu_ps *amdgpu_new_state,
5023 struct amdgpu_ps *amdgpu_current_state)
5024{
5025 struct ci_power_info *pi = ci_get_pi(adev);
5026 enum amdgpu_pcie_gen target_link_speed =
5027 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5028 u8 request;
5029
5030 if (pi->pspp_notify_required) {
5031 if (target_link_speed == AMDGPU_PCIE_GEN3)
5032 request = PCIE_PERF_REQ_PECI_GEN3;
5033 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5034 request = PCIE_PERF_REQ_PECI_GEN2;
5035 else
5036 request = PCIE_PERF_REQ_PECI_GEN1;
5037
5038 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5039 (ci_get_current_pcie_speed(adev) > 0))
5040 return;
5041
5042#ifdef CONFIG_ACPI
5043 amdgpu_acpi_pcie_performance_request(adev, request, false);
5044#endif
5045 }
5046}
5047
5048static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5049{
5050 struct ci_power_info *pi = ci_get_pi(adev);
5051 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5052 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5053 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5054 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5055 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5056 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5057
5058 if (allowed_sclk_vddc_table == NULL)
5059 return -EINVAL;
5060 if (allowed_sclk_vddc_table->count < 1)
5061 return -EINVAL;
5062 if (allowed_mclk_vddc_table == NULL)
5063 return -EINVAL;
5064 if (allowed_mclk_vddc_table->count < 1)
5065 return -EINVAL;
5066 if (allowed_mclk_vddci_table == NULL)
5067 return -EINVAL;
5068 if (allowed_mclk_vddci_table->count < 1)
5069 return -EINVAL;
5070
5071 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5072 pi->max_vddc_in_pp_table =
5073 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5074
5075 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5076 pi->max_vddci_in_pp_table =
5077 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5078
5079 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5080 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5081 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5082 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5083 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5084 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5085 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5086 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5087
5088 return 0;
5089}
5090
5091static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5092{
5093 struct ci_power_info *pi = ci_get_pi(adev);
5094 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5095 u32 leakage_index;
5096
5097 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5098 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5099 *vddc = leakage_table->actual_voltage[leakage_index];
5100 break;
5101 }
5102 }
5103}
5104
5105static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5106{
5107 struct ci_power_info *pi = ci_get_pi(adev);
5108 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5109 u32 leakage_index;
5110
5111 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5112 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5113 *vddci = leakage_table->actual_voltage[leakage_index];
5114 break;
5115 }
5116 }
5117}
5118
5119static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5120 struct amdgpu_clock_voltage_dependency_table *table)
5121{
5122 u32 i;
5123
5124 if (table) {
5125 for (i = 0; i < table->count; i++)
5126 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5127 }
5128}
5129
5130static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5131 struct amdgpu_clock_voltage_dependency_table *table)
5132{
5133 u32 i;
5134
5135 if (table) {
5136 for (i = 0; i < table->count; i++)
5137 ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5138 }
5139}
5140
5141static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5142 struct amdgpu_vce_clock_voltage_dependency_table *table)
5143{
5144 u32 i;
5145
5146 if (table) {
5147 for (i = 0; i < table->count; i++)
5148 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5149 }
5150}
5151
5152static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5153 struct amdgpu_uvd_clock_voltage_dependency_table *table)
5154{
5155 u32 i;
5156
5157 if (table) {
5158 for (i = 0; i < table->count; i++)
5159 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5160 }
5161}
5162
5163static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5164 struct amdgpu_phase_shedding_limits_table *table)
5165{
5166 u32 i;
5167
5168 if (table) {
5169 for (i = 0; i < table->count; i++)
5170 ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5171 }
5172}
5173
5174static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5175 struct amdgpu_clock_and_voltage_limits *table)
5176{
5177 if (table) {
5178 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5179 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5180 }
5181}
5182
5183static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5184 struct amdgpu_cac_leakage_table *table)
5185{
5186 u32 i;
5187
5188 if (table) {
5189 for (i = 0; i < table->count; i++)
5190 ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5191 }
5192}
5193
5194static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5195{
5196
5197 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5198 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5199 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5200 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5201 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5202 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5203 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5204 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5205 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5206 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5207 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5208 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5209 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5210 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5211 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5212 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5213 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5214 &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5215 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5216 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5217 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5218 &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5219 ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5220 &adev->pm.dpm.dyn_state.cac_leakage_table);
5221
5222}
5223
5224static void ci_update_current_ps(struct amdgpu_device *adev,
5225 struct amdgpu_ps *rps)
5226{
5227 struct ci_ps *new_ps = ci_get_ps(rps);
5228 struct ci_power_info *pi = ci_get_pi(adev);
5229
5230 pi->current_rps = *rps;
5231 pi->current_ps = *new_ps;
5232 pi->current_rps.ps_priv = &pi->current_ps;
5233}
5234
5235static void ci_update_requested_ps(struct amdgpu_device *adev,
5236 struct amdgpu_ps *rps)
5237{
5238 struct ci_ps *new_ps = ci_get_ps(rps);
5239 struct ci_power_info *pi = ci_get_pi(adev);
5240
5241 pi->requested_rps = *rps;
5242 pi->requested_ps = *new_ps;
5243 pi->requested_rps.ps_priv = &pi->requested_ps;
5244}
5245
5246static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
5247{
5248 struct ci_power_info *pi = ci_get_pi(adev);
5249 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5250 struct amdgpu_ps *new_ps = &requested_ps;
5251
5252 ci_update_requested_ps(adev, new_ps);
5253
5254 ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5255
5256 return 0;
5257}
5258
5259static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
5260{
5261 struct ci_power_info *pi = ci_get_pi(adev);
5262 struct amdgpu_ps *new_ps = &pi->requested_rps;
5263
5264 ci_update_current_ps(adev, new_ps);
5265}
5266
5267
5268static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5269{
5270 ci_read_clock_registers(adev);
5271 ci_enable_acpi_power_management(adev);
5272 ci_init_sclk_t(adev);
5273}
5274
5275static int ci_dpm_enable(struct amdgpu_device *adev)
5276{
5277 struct ci_power_info *pi = ci_get_pi(adev);
5278 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5279 int ret;
5280
5281 if (amdgpu_ci_is_smc_running(adev))
5282 return -EINVAL;
5283 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5284 ci_enable_voltage_control(adev);
5285 ret = ci_construct_voltage_tables(adev);
5286 if (ret) {
5287 DRM_ERROR("ci_construct_voltage_tables failed\n");
5288 return ret;
5289 }
5290 }
5291 if (pi->caps_dynamic_ac_timing) {
5292 ret = ci_initialize_mc_reg_table(adev);
5293 if (ret)
5294 pi->caps_dynamic_ac_timing = false;
5295 }
5296 if (pi->dynamic_ss)
5297 ci_enable_spread_spectrum(adev, true);
5298 if (pi->thermal_protection)
5299 ci_enable_thermal_protection(adev, true);
5300 ci_program_sstp(adev);
5301 ci_enable_display_gap(adev);
5302 ci_program_vc(adev);
5303 ret = ci_upload_firmware(adev);
5304 if (ret) {
5305 DRM_ERROR("ci_upload_firmware failed\n");
5306 return ret;
5307 }
5308 ret = ci_process_firmware_header(adev);
5309 if (ret) {
5310 DRM_ERROR("ci_process_firmware_header failed\n");
5311 return ret;
5312 }
5313 ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5314 if (ret) {
5315 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5316 return ret;
5317 }
5318 ret = ci_init_smc_table(adev);
5319 if (ret) {
5320 DRM_ERROR("ci_init_smc_table failed\n");
5321 return ret;
5322 }
5323 ret = ci_init_arb_table_index(adev);
5324 if (ret) {
5325 DRM_ERROR("ci_init_arb_table_index failed\n");
5326 return ret;
5327 }
5328 if (pi->caps_dynamic_ac_timing) {
5329 ret = ci_populate_initial_mc_reg_table(adev);
5330 if (ret) {
5331 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5332 return ret;
5333 }
5334 }
5335 ret = ci_populate_pm_base(adev);
5336 if (ret) {
5337 DRM_ERROR("ci_populate_pm_base failed\n");
5338 return ret;
5339 }
5340 ci_dpm_start_smc(adev);
5341 ci_enable_vr_hot_gpio_interrupt(adev);
5342 ret = ci_notify_smc_display_change(adev, false);
5343 if (ret) {
5344 DRM_ERROR("ci_notify_smc_display_change failed\n");
5345 return ret;
5346 }
5347 ci_enable_sclk_control(adev, true);
5348 ret = ci_enable_ulv(adev, true);
5349 if (ret) {
5350 DRM_ERROR("ci_enable_ulv failed\n");
5351 return ret;
5352 }
5353 ret = ci_enable_ds_master_switch(adev, true);
5354 if (ret) {
5355 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5356 return ret;
5357 }
5358 ret = ci_start_dpm(adev);
5359 if (ret) {
5360 DRM_ERROR("ci_start_dpm failed\n");
5361 return ret;
5362 }
5363 ret = ci_enable_didt(adev, true);
5364 if (ret) {
5365 DRM_ERROR("ci_enable_didt failed\n");
5366 return ret;
5367 }
5368 ret = ci_enable_smc_cac(adev, true);
5369 if (ret) {
5370 DRM_ERROR("ci_enable_smc_cac failed\n");
5371 return ret;
5372 }
5373 ret = ci_enable_power_containment(adev, true);
5374 if (ret) {
5375 DRM_ERROR("ci_enable_power_containment failed\n");
5376 return ret;
5377 }
5378
5379 ret = ci_power_control_set_level(adev);
5380 if (ret) {
5381 DRM_ERROR("ci_power_control_set_level failed\n");
5382 return ret;
5383 }
5384
5385 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5386
5387 ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5388 if (ret) {
5389 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5390 return ret;
5391 }
5392
5393 ci_thermal_start_thermal_controller(adev);
5394
5395 ci_update_current_ps(adev, boot_ps);
5396
5397 if (adev->irq.installed &&
5398 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
5399#if 0
5400 PPSMC_Result result;
5401#endif
5402 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
5403 CISLANDS_TEMP_RANGE_MAX);
5404 if (ret) {
5405 DRM_ERROR("ci_thermal_set_temperature_range failed\n");
5406 return ret;
5407 }
5408 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
5409 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5410 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
5411 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5412
5413#if 0
5414 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
5415
5416 if (result != PPSMC_Result_OK)
5417 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
5418#endif
5419 }
5420
5421 return 0;
5422}
5423
5424static void ci_dpm_disable(struct amdgpu_device *adev)
5425{
5426 struct ci_power_info *pi = ci_get_pi(adev);
5427 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5428
5429 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5430 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5431 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5432 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5433
5434 ci_dpm_powergate_uvd(adev, false);
5435
5436 if (!amdgpu_ci_is_smc_running(adev))
5437 return;
5438
5439 ci_thermal_stop_thermal_controller(adev);
5440
5441 if (pi->thermal_protection)
5442 ci_enable_thermal_protection(adev, false);
5443 ci_enable_power_containment(adev, false);
5444 ci_enable_smc_cac(adev, false);
5445 ci_enable_didt(adev, false);
5446 ci_enable_spread_spectrum(adev, false);
5447 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5448 ci_stop_dpm(adev);
5449 ci_enable_ds_master_switch(adev, false);
5450 ci_enable_ulv(adev, false);
5451 ci_clear_vc(adev);
5452 ci_reset_to_default(adev);
5453 ci_dpm_stop_smc(adev);
5454 ci_force_switch_to_arb_f0(adev);
5455 ci_enable_thermal_based_sclk_dpm(adev, false);
5456
5457 ci_update_current_ps(adev, boot_ps);
5458}
5459
5460static int ci_dpm_set_power_state(struct amdgpu_device *adev)
5461{
5462 struct ci_power_info *pi = ci_get_pi(adev);
5463 struct amdgpu_ps *new_ps = &pi->requested_rps;
5464 struct amdgpu_ps *old_ps = &pi->current_rps;
5465 int ret;
5466
5467 ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5468 if (pi->pcie_performance_request)
5469 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5470 ret = ci_freeze_sclk_mclk_dpm(adev);
5471 if (ret) {
5472 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5473 return ret;
5474 }
5475 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5476 if (ret) {
5477 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5478 return ret;
5479 }
5480 ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5481 if (ret) {
5482 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5483 return ret;
5484 }
5485
5486 ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5487 if (ret) {
5488 DRM_ERROR("ci_update_vce_dpm failed\n");
5489 return ret;
5490 }
5491
5492 ret = ci_update_sclk_t(adev);
5493 if (ret) {
5494 DRM_ERROR("ci_update_sclk_t failed\n");
5495 return ret;
5496 }
5497 if (pi->caps_dynamic_ac_timing) {
5498 ret = ci_update_and_upload_mc_reg_table(adev);
5499 if (ret) {
5500 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5501 return ret;
5502 }
5503 }
5504 ret = ci_program_memory_timing_parameters(adev);
5505 if (ret) {
5506 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5507 return ret;
5508 }
5509 ret = ci_unfreeze_sclk_mclk_dpm(adev);
5510 if (ret) {
5511 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5512 return ret;
5513 }
5514 ret = ci_upload_dpm_level_enable_mask(adev);
5515 if (ret) {
5516 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5517 return ret;
5518 }
5519 if (pi->pcie_performance_request)
5520 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5521
5522 return 0;
5523}
5524
5525#if 0
5526static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5527{
5528 ci_set_boot_state(adev);
5529}
5530#endif
5531
5532static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
5533{
5534 ci_program_display_gap(adev);
5535}
5536
5537union power_info {
5538 struct _ATOM_POWERPLAY_INFO info;
5539 struct _ATOM_POWERPLAY_INFO_V2 info_2;
5540 struct _ATOM_POWERPLAY_INFO_V3 info_3;
5541 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5542 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5543 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5544};
5545
5546union pplib_clock_info {
5547 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5548 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5549 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5550 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5551 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5552 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5553};
5554
5555union pplib_power_state {
5556 struct _ATOM_PPLIB_STATE v1;
5557 struct _ATOM_PPLIB_STATE_V2 v2;
5558};
5559
5560static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5561 struct amdgpu_ps *rps,
5562 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5563 u8 table_rev)
5564{
5565 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5566 rps->class = le16_to_cpu(non_clock_info->usClassification);
5567 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5568
5569 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5570 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5571 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5572 } else {
5573 rps->vclk = 0;
5574 rps->dclk = 0;
5575 }
5576
5577 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5578 adev->pm.dpm.boot_ps = rps;
5579 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5580 adev->pm.dpm.uvd_ps = rps;
5581}
5582
5583static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5584 struct amdgpu_ps *rps, int index,
5585 union pplib_clock_info *clock_info)
5586{
5587 struct ci_power_info *pi = ci_get_pi(adev);
5588 struct ci_ps *ps = ci_get_ps(rps);
5589 struct ci_pl *pl = &ps->performance_levels[index];
5590
5591 ps->performance_level_count = index + 1;
5592
5593 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5594 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5595 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5596 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5597
5598 pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5599 pi->sys_pcie_mask,
5600 pi->vbios_boot_state.pcie_gen_bootup_value,
5601 clock_info->ci.ucPCIEGen);
5602 pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5603 pi->vbios_boot_state.pcie_lane_bootup_value,
5604 le16_to_cpu(clock_info->ci.usPCIELane));
5605
5606 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5607 pi->acpi_pcie_gen = pl->pcie_gen;
5608 }
5609
5610 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5611 pi->ulv.supported = true;
5612 pi->ulv.pl = *pl;
5613 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5614 }
5615
5616 /* patch up boot state */
5617 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5618 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5619 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5620 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5621 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5622 }
5623
5624 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5625 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5626 pi->use_pcie_powersaving_levels = true;
5627 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5628 pi->pcie_gen_powersaving.max = pl->pcie_gen;
5629 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5630 pi->pcie_gen_powersaving.min = pl->pcie_gen;
5631 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5632 pi->pcie_lane_powersaving.max = pl->pcie_lane;
5633 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5634 pi->pcie_lane_powersaving.min = pl->pcie_lane;
5635 break;
5636 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5637 pi->use_pcie_performance_levels = true;
5638 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5639 pi->pcie_gen_performance.max = pl->pcie_gen;
5640 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5641 pi->pcie_gen_performance.min = pl->pcie_gen;
5642 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5643 pi->pcie_lane_performance.max = pl->pcie_lane;
5644 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5645 pi->pcie_lane_performance.min = pl->pcie_lane;
5646 break;
5647 default:
5648 break;
5649 }
5650}
5651
5652static int ci_parse_power_table(struct amdgpu_device *adev)
5653{
5654 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5655 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5656 union pplib_power_state *power_state;
5657 int i, j, k, non_clock_array_index, clock_array_index;
5658 union pplib_clock_info *clock_info;
5659 struct _StateArray *state_array;
5660 struct _ClockInfoArray *clock_info_array;
5661 struct _NonClockInfoArray *non_clock_info_array;
5662 union power_info *power_info;
5663 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5664 u16 data_offset;
5665 u8 frev, crev;
5666 u8 *power_state_offset;
5667 struct ci_ps *ps;
5668
5669 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5670 &frev, &crev, &data_offset))
5671 return -EINVAL;
5672 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5673
5674 amdgpu_add_thermal_controller(adev);
5675
5676 state_array = (struct _StateArray *)
5677 (mode_info->atom_context->bios + data_offset +
5678 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5679 clock_info_array = (struct _ClockInfoArray *)
5680 (mode_info->atom_context->bios + data_offset +
5681 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5682 non_clock_info_array = (struct _NonClockInfoArray *)
5683 (mode_info->atom_context->bios + data_offset +
5684 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5685
5686 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5687 state_array->ucNumEntries, GFP_KERNEL);
5688 if (!adev->pm.dpm.ps)
5689 return -ENOMEM;
5690 power_state_offset = (u8 *)state_array->states;
5691 for (i = 0; i < state_array->ucNumEntries; i++) {
5692 u8 *idx;
5693 power_state = (union pplib_power_state *)power_state_offset;
5694 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5695 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5696 &non_clock_info_array->nonClockInfo[non_clock_array_index];
5697 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5698 if (ps == NULL) {
5699 kfree(adev->pm.dpm.ps);
5700 return -ENOMEM;
5701 }
5702 adev->pm.dpm.ps[i].ps_priv = ps;
5703 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5704 non_clock_info,
5705 non_clock_info_array->ucEntrySize);
5706 k = 0;
5707 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5708 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5709 clock_array_index = idx[j];
5710 if (clock_array_index >= clock_info_array->ucNumEntries)
5711 continue;
5712 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5713 break;
5714 clock_info = (union pplib_clock_info *)
5715 ((u8 *)&clock_info_array->clockInfo[0] +
5716 (clock_array_index * clock_info_array->ucEntrySize));
5717 ci_parse_pplib_clock_info(adev,
5718 &adev->pm.dpm.ps[i], k,
5719 clock_info);
5720 k++;
5721 }
5722 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5723 }
5724 adev->pm.dpm.num_ps = state_array->ucNumEntries;
5725
5726 /* fill in the vce power states */
5727 for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
5728 u32 sclk, mclk;
5729 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5730 clock_info = (union pplib_clock_info *)
5731 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5732 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5733 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5734 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5735 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5736 adev->pm.dpm.vce_states[i].sclk = sclk;
5737 adev->pm.dpm.vce_states[i].mclk = mclk;
5738 }
5739
5740 return 0;
5741}
5742
5743static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5744 struct ci_vbios_boot_state *boot_state)
5745{
5746 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5747 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5748 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5749 u8 frev, crev;
5750 u16 data_offset;
5751
5752 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5753 &frev, &crev, &data_offset)) {
5754 firmware_info =
5755 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5756 data_offset);
5757 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5758 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5759 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5760 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5761 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5762 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5763 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5764
5765 return 0;
5766 }
5767 return -EINVAL;
5768}
5769
5770static void ci_dpm_fini(struct amdgpu_device *adev)
5771{
5772 int i;
5773
5774 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5775 kfree(adev->pm.dpm.ps[i].ps_priv);
5776 }
5777 kfree(adev->pm.dpm.ps);
5778 kfree(adev->pm.dpm.priv);
5779 kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5780 amdgpu_free_extended_power_table(adev);
5781}
5782
5783/**
5784 * ci_dpm_init_microcode - load ucode images from disk
5785 *
5786 * @adev: amdgpu_device pointer
5787 *
5788 * Use the firmware interface to load the ucode images into
5789 * the driver (not loaded into hw).
5790 * Returns 0 on success, error on failure.
5791 */
5792static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5793{
5794 const char *chip_name;
5795 char fw_name[30];
5796 int err;
5797
5798 DRM_DEBUG("\n");
5799
5800 switch (adev->asic_type) {
5801 case CHIP_BONAIRE:
5802 chip_name = "bonaire";
5803 break;
5804 case CHIP_HAWAII:
5805 chip_name = "hawaii";
5806 break;
5807 case CHIP_KAVERI:
5808 case CHIP_KABINI:
5809 default: BUG();
5810 }
5811
5812 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5813 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5814 if (err)
5815 goto out;
5816 err = amdgpu_ucode_validate(adev->pm.fw);
5817
5818out:
5819 if (err) {
5820 printk(KERN_ERR
5821 "cik_smc: Failed to load firmware \"%s\"\n",
5822 fw_name);
5823 release_firmware(adev->pm.fw);
5824 adev->pm.fw = NULL;
5825 }
5826 return err;
5827}
5828
5829static int ci_dpm_init(struct amdgpu_device *adev)
5830{
5831 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5832 SMU7_Discrete_DpmTable *dpm_table;
5833 struct amdgpu_gpio_rec gpio;
5834 u16 data_offset, size;
5835 u8 frev, crev;
5836 struct ci_power_info *pi;
5837 int ret;
5838 u32 mask;
5839
a2e73f56
AD
5840 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5841 if (pi == NULL)
5842 return -ENOMEM;
5843 adev->pm.dpm.priv = pi;
5844
5845 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
5846 if (ret)
5847 pi->sys_pcie_mask = 0;
5848 else
5849 pi->sys_pcie_mask = mask;
5850 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5851
5852 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5853 pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5854 pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5855 pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5856
5857 pi->pcie_lane_performance.max = 0;
5858 pi->pcie_lane_performance.min = 16;
5859 pi->pcie_lane_powersaving.max = 0;
5860 pi->pcie_lane_powersaving.min = 16;
5861
5862 ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5863 if (ret) {
5864 ci_dpm_fini(adev);
5865 return ret;
5866 }
5867
5868 ret = amdgpu_get_platform_caps(adev);
5869 if (ret) {
5870 ci_dpm_fini(adev);
5871 return ret;
5872 }
5873
5874 ret = amdgpu_parse_extended_power_table(adev);
5875 if (ret) {
5876 ci_dpm_fini(adev);
5877 return ret;
5878 }
5879
5880 ret = ci_parse_power_table(adev);
5881 if (ret) {
5882 ci_dpm_fini(adev);
5883 return ret;
5884 }
5885
5886 pi->dll_default_on = false;
5887 pi->sram_end = SMC_RAM_END;
5888
5889 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5890 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5891 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5892 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5893 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5894 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5895 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5896 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5897
5898 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5899
5900 pi->sclk_dpm_key_disabled = 0;
5901 pi->mclk_dpm_key_disabled = 0;
5902 pi->pcie_dpm_key_disabled = 0;
5903 pi->thermal_sclk_dpm_enabled = 0;
5904
5905 pi->caps_sclk_ds = true;
5906
5907 pi->mclk_strobe_mode_threshold = 40000;
5908 pi->mclk_stutter_mode_threshold = 40000;
5909 pi->mclk_edc_enable_threshold = 40000;
5910 pi->mclk_edc_wr_enable_threshold = 40000;
5911
5912 ci_initialize_powertune_defaults(adev);
5913
5914 pi->caps_fps = false;
5915
5916 pi->caps_sclk_throttle_low_notification = false;
5917
5918 pi->caps_uvd_dpm = true;
5919 pi->caps_vce_dpm = true;
5920
5921 ci_get_leakage_voltages(adev);
5922 ci_patch_dependency_tables_with_leakage(adev);
5923 ci_set_private_data_variables_based_on_pptable(adev);
5924
5925 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5926 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5927 if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5928 ci_dpm_fini(adev);
5929 return -ENOMEM;
5930 }
5931 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5932 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5933 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5934 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5935 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5936 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5937 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5938 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5939 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5940
5941 adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5942 adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5943 adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5944
5945 adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5946 adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5947 adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5948 adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5949
5950 if (adev->asic_type == CHIP_HAWAII) {
5951 pi->thermal_temp_setting.temperature_low = 94500;
5952 pi->thermal_temp_setting.temperature_high = 95000;
5953 pi->thermal_temp_setting.temperature_shutdown = 104000;
5954 } else {
5955 pi->thermal_temp_setting.temperature_low = 99500;
5956 pi->thermal_temp_setting.temperature_high = 100000;
5957 pi->thermal_temp_setting.temperature_shutdown = 104000;
5958 }
5959
5960 pi->uvd_enabled = false;
5961
5962 dpm_table = &pi->smc_state_table;
5963
5964 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5965 if (gpio.valid) {
5966 dpm_table->VRHotGpio = gpio.shift;
5967 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5968 } else {
5969 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5970 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5971 }
5972
5973 gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
5974 if (gpio.valid) {
5975 dpm_table->AcDcGpio = gpio.shift;
5976 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5977 } else {
5978 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5979 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5980 }
5981
5982 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
5983 if (gpio.valid) {
5984 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
5985
5986 switch (gpio.shift) {
5987 case 0:
5988 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5989 tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5990 break;
5991 case 1:
5992 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5993 tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5994 break;
5995 case 2:
5996 tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
5997 break;
5998 case 3:
5999 tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
6000 break;
6001 case 4:
6002 tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
6003 break;
6004 default:
6005 DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
6006 break;
6007 }
6008 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
6009 }
6010
6011 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6012 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6013 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6014 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
6015 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6016 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
6017 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6018
6019 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
6020 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
6021 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6022 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
6023 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6024 else
6025 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
6026 }
6027
6028 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6029 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6030 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6031 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6032 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6033 else
6034 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6035 }
6036
6037 pi->vddc_phase_shed_control = true;
6038
6039#if defined(CONFIG_ACPI)
6040 pi->pcie_performance_request =
6041 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6042#else
6043 pi->pcie_performance_request = false;
6044#endif
6045
6046 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6047 &frev, &crev, &data_offset)) {
6048 pi->caps_sclk_ss_support = true;
6049 pi->caps_mclk_ss_support = true;
6050 pi->dynamic_ss = true;
6051 } else {
6052 pi->caps_sclk_ss_support = false;
6053 pi->caps_mclk_ss_support = false;
6054 pi->dynamic_ss = true;
6055 }
6056
6057 if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6058 pi->thermal_protection = true;
6059 else
6060 pi->thermal_protection = false;
6061
6062 pi->caps_dynamic_ac_timing = true;
6063
6064 pi->uvd_power_gated = false;
6065
6066 /* make sure dc limits are valid */
6067 if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6068 (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6069 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6070 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6071
6072 pi->fan_ctrl_is_in_default_mode = true;
6073
6074 return 0;
6075}
6076
6077static void
6078ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
6079 struct seq_file *m)
6080{
6081 struct ci_power_info *pi = ci_get_pi(adev);
6082 struct amdgpu_ps *rps = &pi->current_rps;
6083 u32 sclk = ci_get_average_sclk_freq(adev);
6084 u32 mclk = ci_get_average_mclk_freq(adev);
9354573d
RZ
6085 u32 activity_percent = 50;
6086 int ret;
6087
6088 ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6089 &activity_percent);
6090
6091 if (ret == 0) {
6092 activity_percent += 0x80;
6093 activity_percent >>= 8;
6094 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6095 }
a2e73f56
AD
6096
6097 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
6098 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6099 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
6100 sclk, mclk);
9354573d 6101 seq_printf(m, "GPU load: %u %%\n", activity_percent);
a2e73f56
AD
6102}
6103
6104static void ci_dpm_print_power_state(struct amdgpu_device *adev,
6105 struct amdgpu_ps *rps)
6106{
6107 struct ci_ps *ps = ci_get_ps(rps);
6108 struct ci_pl *pl;
6109 int i;
6110
6111 amdgpu_dpm_print_class_info(rps->class, rps->class2);
6112 amdgpu_dpm_print_cap_info(rps->caps);
6113 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6114 for (i = 0; i < ps->performance_level_count; i++) {
6115 pl = &ps->performance_levels[i];
6116 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6117 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6118 }
6119 amdgpu_dpm_print_ps_status(adev, rps);
6120}
6121
6122static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
6123{
6124 struct ci_power_info *pi = ci_get_pi(adev);
6125 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6126
6127 if (low)
6128 return requested_state->performance_levels[0].sclk;
6129 else
6130 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6131}
6132
6133static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
6134{
6135 struct ci_power_info *pi = ci_get_pi(adev);
6136 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6137
6138 if (low)
6139 return requested_state->performance_levels[0].mclk;
6140 else
6141 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6142}
6143
6144/* get temperature in millidegrees */
6145static int ci_dpm_get_temp(struct amdgpu_device *adev)
6146{
6147 u32 temp;
6148 int actual_temp = 0;
6149
6150 temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6151 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6152
6153 if (temp & 0x200)
6154 actual_temp = 255;
6155 else
6156 actual_temp = temp & 0x1ff;
6157
6158 actual_temp = actual_temp * 1000;
6159
6160 return actual_temp;
6161}
6162
6163static int ci_set_temperature_range(struct amdgpu_device *adev)
6164{
6165 int ret;
6166
6167 ret = ci_thermal_enable_alert(adev, false);
6168 if (ret)
6169 return ret;
6170 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6171 CISLANDS_TEMP_RANGE_MAX);
6172 if (ret)
6173 return ret;
6174 ret = ci_thermal_enable_alert(adev, true);
6175 if (ret)
6176 return ret;
6177 return ret;
6178}
6179
5fc3aeeb 6180static int ci_dpm_early_init(void *handle)
a2e73f56 6181{
5fc3aeeb 6182 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6183
a2e73f56
AD
6184 ci_dpm_set_dpm_funcs(adev);
6185 ci_dpm_set_irq_funcs(adev);
6186
6187 return 0;
6188}
6189
5fc3aeeb 6190static int ci_dpm_late_init(void *handle)
a2e73f56
AD
6191{
6192 int ret;
5fc3aeeb 6193 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
a2e73f56
AD
6194
6195 if (!amdgpu_dpm)
6196 return 0;
6197
fa022a9b
AD
6198 /* init the sysfs and debugfs files late */
6199 ret = amdgpu_pm_sysfs_init(adev);
6200 if (ret)
6201 return ret;
6202
a2e73f56
AD
6203 ret = ci_set_temperature_range(adev);
6204 if (ret)
6205 return ret;
6206
6207 ci_dpm_powergate_uvd(adev, true);
6208
6209 return 0;
6210}
6211
5fc3aeeb 6212static int ci_dpm_sw_init(void *handle)
a2e73f56
AD
6213{
6214 int ret;
5fc3aeeb 6215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
a2e73f56
AD
6216
6217 ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
6218 if (ret)
6219 return ret;
6220
6221 ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
6222 if (ret)
6223 return ret;
6224
6225 /* default to balanced state */
6226 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6227 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
6228 adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
6229 adev->pm.default_sclk = adev->clock.default_sclk;
6230 adev->pm.default_mclk = adev->clock.default_mclk;
6231 adev->pm.current_sclk = adev->clock.default_sclk;
6232 adev->pm.current_mclk = adev->clock.default_mclk;
6233 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6234
6235 if (amdgpu_dpm == 0)
6236 return 0;
6237
faad24cf
CK
6238 ret = ci_dpm_init_microcode(adev);
6239 if (ret)
6240 return ret;
6241
a2e73f56
AD
6242 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6243 mutex_lock(&adev->pm.mutex);
6244 ret = ci_dpm_init(adev);
6245 if (ret)
6246 goto dpm_failed;
6247 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6248 if (amdgpu_dpm == 1)
6249 amdgpu_pm_print_power_states(adev);
a2e73f56
AD
6250 mutex_unlock(&adev->pm.mutex);
6251 DRM_INFO("amdgpu: dpm initialized\n");
6252
6253 return 0;
6254
6255dpm_failed:
6256 ci_dpm_fini(adev);
6257 mutex_unlock(&adev->pm.mutex);
6258 DRM_ERROR("amdgpu: dpm initialization failed\n");
6259 return ret;
6260}
6261
5fc3aeeb 6262static int ci_dpm_sw_fini(void *handle)
a2e73f56 6263{
5fc3aeeb 6264 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6265
a2e73f56
AD
6266 mutex_lock(&adev->pm.mutex);
6267 amdgpu_pm_sysfs_fini(adev);
6268 ci_dpm_fini(adev);
6269 mutex_unlock(&adev->pm.mutex);
6270
6271 return 0;
6272}
6273
5fc3aeeb 6274static int ci_dpm_hw_init(void *handle)
a2e73f56
AD
6275{
6276 int ret;
6277
5fc3aeeb 6278 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6279
a2e73f56
AD
6280 if (!amdgpu_dpm)
6281 return 0;
6282
6283 mutex_lock(&adev->pm.mutex);
6284 ci_dpm_setup_asic(adev);
6285 ret = ci_dpm_enable(adev);
6286 if (ret)
6287 adev->pm.dpm_enabled = false;
6288 else
6289 adev->pm.dpm_enabled = true;
6290 mutex_unlock(&adev->pm.mutex);
6291
6292 return ret;
6293}
6294
5fc3aeeb 6295static int ci_dpm_hw_fini(void *handle)
a2e73f56 6296{
5fc3aeeb 6297 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6298
a2e73f56
AD
6299 if (adev->pm.dpm_enabled) {
6300 mutex_lock(&adev->pm.mutex);
6301 ci_dpm_disable(adev);
6302 mutex_unlock(&adev->pm.mutex);
6303 }
6304
6305 return 0;
6306}
6307
5fc3aeeb 6308static int ci_dpm_suspend(void *handle)
a2e73f56 6309{
5fc3aeeb 6310 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6311
a2e73f56
AD
6312 if (adev->pm.dpm_enabled) {
6313 mutex_lock(&adev->pm.mutex);
6314 /* disable dpm */
6315 ci_dpm_disable(adev);
6316 /* reset the power state */
6317 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6318 mutex_unlock(&adev->pm.mutex);
6319 }
6320 return 0;
6321}
6322
5fc3aeeb 6323static int ci_dpm_resume(void *handle)
a2e73f56
AD
6324{
6325 int ret;
5fc3aeeb 6326 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
a2e73f56
AD
6327
6328 if (adev->pm.dpm_enabled) {
6329 /* asic init will reset to the boot state */
6330 mutex_lock(&adev->pm.mutex);
6331 ci_dpm_setup_asic(adev);
6332 ret = ci_dpm_enable(adev);
6333 if (ret)
6334 adev->pm.dpm_enabled = false;
6335 else
6336 adev->pm.dpm_enabled = true;
6337 mutex_unlock(&adev->pm.mutex);
6338 if (adev->pm.dpm_enabled)
6339 amdgpu_pm_compute_clocks(adev);
6340 }
6341 return 0;
6342}
6343
5fc3aeeb 6344static bool ci_dpm_is_idle(void *handle)
a2e73f56
AD
6345{
6346 /* XXX */
6347 return true;
6348}
6349
5fc3aeeb 6350static int ci_dpm_wait_for_idle(void *handle)
a2e73f56
AD
6351{
6352 /* XXX */
6353 return 0;
6354}
6355
5fc3aeeb 6356static void ci_dpm_print_status(void *handle)
a2e73f56 6357{
5fc3aeeb 6358 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6359
a2e73f56
AD
6360 dev_info(adev->dev, "CIK DPM registers\n");
6361 dev_info(adev->dev, " BIOS_SCRATCH_4=0x%08X\n",
6362 RREG32(mmBIOS_SCRATCH_4));
6363 dev_info(adev->dev, " MC_ARB_DRAM_TIMING=0x%08X\n",
6364 RREG32(mmMC_ARB_DRAM_TIMING));
6365 dev_info(adev->dev, " MC_ARB_DRAM_TIMING2=0x%08X\n",
6366 RREG32(mmMC_ARB_DRAM_TIMING2));
6367 dev_info(adev->dev, " MC_ARB_BURST_TIME=0x%08X\n",
6368 RREG32(mmMC_ARB_BURST_TIME));
6369 dev_info(adev->dev, " MC_ARB_DRAM_TIMING_1=0x%08X\n",
6370 RREG32(mmMC_ARB_DRAM_TIMING_1));
6371 dev_info(adev->dev, " MC_ARB_DRAM_TIMING2_1=0x%08X\n",
6372 RREG32(mmMC_ARB_DRAM_TIMING2_1));
6373 dev_info(adev->dev, " MC_CG_CONFIG=0x%08X\n",
6374 RREG32(mmMC_CG_CONFIG));
6375 dev_info(adev->dev, " MC_ARB_CG=0x%08X\n",
6376 RREG32(mmMC_ARB_CG));
6377 dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n",
6378 RREG32_DIDT(ixDIDT_SQ_CTRL0));
6379 dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n",
6380 RREG32_DIDT(ixDIDT_DB_CTRL0));
6381 dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n",
6382 RREG32_DIDT(ixDIDT_TD_CTRL0));
6383 dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n",
6384 RREG32_DIDT(ixDIDT_TCP_CTRL0));
6385 dev_info(adev->dev, " CG_THERMAL_INT=0x%08X\n",
6386 RREG32_SMC(ixCG_THERMAL_INT));
6387 dev_info(adev->dev, " CG_THERMAL_CTRL=0x%08X\n",
6388 RREG32_SMC(ixCG_THERMAL_CTRL));
6389 dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n",
6390 RREG32_SMC(ixGENERAL_PWRMGT));
6391 dev_info(adev->dev, " MC_SEQ_CNTL_3=0x%08X\n",
6392 RREG32(mmMC_SEQ_CNTL_3));
6393 dev_info(adev->dev, " LCAC_MC0_CNTL=0x%08X\n",
6394 RREG32_SMC(ixLCAC_MC0_CNTL));
6395 dev_info(adev->dev, " LCAC_MC1_CNTL=0x%08X\n",
6396 RREG32_SMC(ixLCAC_MC1_CNTL));
6397 dev_info(adev->dev, " LCAC_CPL_CNTL=0x%08X\n",
6398 RREG32_SMC(ixLCAC_CPL_CNTL));
6399 dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n",
6400 RREG32_SMC(ixSCLK_PWRMGT_CNTL));
6401 dev_info(adev->dev, " BIF_LNCNT_RESET=0x%08X\n",
6402 RREG32(mmBIF_LNCNT_RESET));
6403 dev_info(adev->dev, " FIRMWARE_FLAGS=0x%08X\n",
6404 RREG32_SMC(ixFIRMWARE_FLAGS));
6405 dev_info(adev->dev, " CG_SPLL_FUNC_CNTL=0x%08X\n",
6406 RREG32_SMC(ixCG_SPLL_FUNC_CNTL));
6407 dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_2=0x%08X\n",
6408 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2));
6409 dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_3=0x%08X\n",
6410 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3));
6411 dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_4=0x%08X\n",
6412 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4));
6413 dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM=0x%08X\n",
6414 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM));
6415 dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM_2=0x%08X\n",
6416 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2));
6417 dev_info(adev->dev, " DLL_CNTL=0x%08X\n",
6418 RREG32(mmDLL_CNTL));
6419 dev_info(adev->dev, " MCLK_PWRMGT_CNTL=0x%08X\n",
6420 RREG32(mmMCLK_PWRMGT_CNTL));
6421 dev_info(adev->dev, " MPLL_AD_FUNC_CNTL=0x%08X\n",
6422 RREG32(mmMPLL_AD_FUNC_CNTL));
6423 dev_info(adev->dev, " MPLL_DQ_FUNC_CNTL=0x%08X\n",
6424 RREG32(mmMPLL_DQ_FUNC_CNTL));
6425 dev_info(adev->dev, " MPLL_FUNC_CNTL=0x%08X\n",
6426 RREG32(mmMPLL_FUNC_CNTL));
6427 dev_info(adev->dev, " MPLL_FUNC_CNTL_1=0x%08X\n",
6428 RREG32(mmMPLL_FUNC_CNTL_1));
6429 dev_info(adev->dev, " MPLL_FUNC_CNTL_2=0x%08X\n",
6430 RREG32(mmMPLL_FUNC_CNTL_2));
6431 dev_info(adev->dev, " MPLL_SS1=0x%08X\n",
6432 RREG32(mmMPLL_SS1));
6433 dev_info(adev->dev, " MPLL_SS2=0x%08X\n",
6434 RREG32(mmMPLL_SS2));
6435 dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL=0x%08X\n",
6436 RREG32_SMC(ixCG_DISPLAY_GAP_CNTL));
6437 dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL2=0x%08X\n",
6438 RREG32_SMC(ixCG_DISPLAY_GAP_CNTL2));
6439 dev_info(adev->dev, " CG_STATIC_SCREEN_PARAMETER=0x%08X\n",
6440 RREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER));
6441 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n",
6442 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0));
6443 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_1=0x%08X\n",
6444 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_1));
6445 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_2=0x%08X\n",
6446 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_2));
6447 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_3=0x%08X\n",
6448 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_3));
6449 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_4=0x%08X\n",
6450 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_4));
6451 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_5=0x%08X\n",
6452 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_5));
6453 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_6=0x%08X\n",
6454 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_6));
6455 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_7=0x%08X\n",
6456 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_7));
6457 dev_info(adev->dev, " RCU_UC_EVENTS=0x%08X\n",
6458 RREG32_SMC(ixRCU_UC_EVENTS));
6459 dev_info(adev->dev, " DPM_TABLE_475=0x%08X\n",
6460 RREG32_SMC(ixDPM_TABLE_475));
6461 dev_info(adev->dev, " MC_SEQ_RAS_TIMING_LP=0x%08X\n",
6462 RREG32(mmMC_SEQ_RAS_TIMING_LP));
6463 dev_info(adev->dev, " MC_SEQ_RAS_TIMING=0x%08X\n",
6464 RREG32(mmMC_SEQ_RAS_TIMING));
6465 dev_info(adev->dev, " MC_SEQ_CAS_TIMING_LP=0x%08X\n",
6466 RREG32(mmMC_SEQ_CAS_TIMING_LP));
6467 dev_info(adev->dev, " MC_SEQ_CAS_TIMING=0x%08X\n",
6468 RREG32(mmMC_SEQ_CAS_TIMING));
6469 dev_info(adev->dev, " MC_SEQ_DLL_STBY_LP=0x%08X\n",
6470 RREG32(mmMC_SEQ_DLL_STBY_LP));
6471 dev_info(adev->dev, " MC_SEQ_DLL_STBY=0x%08X\n",
6472 RREG32(mmMC_SEQ_DLL_STBY));
6473 dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0_LP=0x%08X\n",
6474 RREG32(mmMC_SEQ_G5PDX_CMD0_LP));
6475 dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0=0x%08X\n",
6476 RREG32(mmMC_SEQ_G5PDX_CMD0));
6477 dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1_LP=0x%08X\n",
6478 RREG32(mmMC_SEQ_G5PDX_CMD1_LP));
6479 dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1=0x%08X\n",
6480 RREG32(mmMC_SEQ_G5PDX_CMD1));
6481 dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL_LP=0x%08X\n",
6482 RREG32(mmMC_SEQ_G5PDX_CTRL_LP));
6483 dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL=0x%08X\n",
6484 RREG32(mmMC_SEQ_G5PDX_CTRL));
6485 dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD_LP=0x%08X\n",
6486 RREG32(mmMC_SEQ_PMG_DVS_CMD_LP));
6487 dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD=0x%08X\n",
6488 RREG32(mmMC_SEQ_PMG_DVS_CMD));
6489 dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL_LP=0x%08X\n",
6490 RREG32(mmMC_SEQ_PMG_DVS_CTL_LP));
6491 dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL=0x%08X\n",
6492 RREG32(mmMC_SEQ_PMG_DVS_CTL));
6493 dev_info(adev->dev, " MC_SEQ_MISC_TIMING_LP=0x%08X\n",
6494 RREG32(mmMC_SEQ_MISC_TIMING_LP));
6495 dev_info(adev->dev, " MC_SEQ_MISC_TIMING=0x%08X\n",
6496 RREG32(mmMC_SEQ_MISC_TIMING));
6497 dev_info(adev->dev, " MC_SEQ_MISC_TIMING2_LP=0x%08X\n",
6498 RREG32(mmMC_SEQ_MISC_TIMING2_LP));
6499 dev_info(adev->dev, " MC_SEQ_MISC_TIMING2=0x%08X\n",
6500 RREG32(mmMC_SEQ_MISC_TIMING2));
6501 dev_info(adev->dev, " MC_SEQ_PMG_CMD_EMRS_LP=0x%08X\n",
6502 RREG32(mmMC_SEQ_PMG_CMD_EMRS_LP));
6503 dev_info(adev->dev, " MC_PMG_CMD_EMRS=0x%08X\n",
6504 RREG32(mmMC_PMG_CMD_EMRS));
6505 dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS_LP=0x%08X\n",
6506 RREG32(mmMC_SEQ_PMG_CMD_MRS_LP));
6507 dev_info(adev->dev, " MC_PMG_CMD_MRS=0x%08X\n",
6508 RREG32(mmMC_PMG_CMD_MRS));
6509 dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS1_LP=0x%08X\n",
6510 RREG32(mmMC_SEQ_PMG_CMD_MRS1_LP));
6511 dev_info(adev->dev, " MC_PMG_CMD_MRS1=0x%08X\n",
6512 RREG32(mmMC_PMG_CMD_MRS1));
6513 dev_info(adev->dev, " MC_SEQ_WR_CTL_D0_LP=0x%08X\n",
6514 RREG32(mmMC_SEQ_WR_CTL_D0_LP));
6515 dev_info(adev->dev, " MC_SEQ_WR_CTL_D0=0x%08X\n",
6516 RREG32(mmMC_SEQ_WR_CTL_D0));
6517 dev_info(adev->dev, " MC_SEQ_WR_CTL_D1_LP=0x%08X\n",
6518 RREG32(mmMC_SEQ_WR_CTL_D1_LP));
6519 dev_info(adev->dev, " MC_SEQ_WR_CTL_D1=0x%08X\n",
6520 RREG32(mmMC_SEQ_WR_CTL_D1));
6521 dev_info(adev->dev, " MC_SEQ_RD_CTL_D0_LP=0x%08X\n",
6522 RREG32(mmMC_SEQ_RD_CTL_D0_LP));
6523 dev_info(adev->dev, " MC_SEQ_RD_CTL_D0=0x%08X\n",
6524 RREG32(mmMC_SEQ_RD_CTL_D0));
6525 dev_info(adev->dev, " MC_SEQ_RD_CTL_D1_LP=0x%08X\n",
6526 RREG32(mmMC_SEQ_RD_CTL_D1_LP));
6527 dev_info(adev->dev, " MC_SEQ_RD_CTL_D1=0x%08X\n",
6528 RREG32(mmMC_SEQ_RD_CTL_D1));
6529 dev_info(adev->dev, " MC_SEQ_PMG_TIMING_LP=0x%08X\n",
6530 RREG32(mmMC_SEQ_PMG_TIMING_LP));
6531 dev_info(adev->dev, " MC_SEQ_PMG_TIMING=0x%08X\n",
6532 RREG32(mmMC_SEQ_PMG_TIMING));
6533 dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS2_LP=0x%08X\n",
6534 RREG32(mmMC_SEQ_PMG_CMD_MRS2_LP));
6535 dev_info(adev->dev, " MC_PMG_CMD_MRS2=0x%08X\n",
6536 RREG32(mmMC_PMG_CMD_MRS2));
6537 dev_info(adev->dev, " MC_SEQ_WR_CTL_2_LP=0x%08X\n",
6538 RREG32(mmMC_SEQ_WR_CTL_2_LP));
6539 dev_info(adev->dev, " MC_SEQ_WR_CTL_2=0x%08X\n",
6540 RREG32(mmMC_SEQ_WR_CTL_2));
6541 dev_info(adev->dev, " PCIE_LC_SPEED_CNTL=0x%08X\n",
6542 RREG32_PCIE(ixPCIE_LC_SPEED_CNTL));
6543 dev_info(adev->dev, " PCIE_LC_LINK_WIDTH_CNTL=0x%08X\n",
6544 RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL));
6545 dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n",
6546 RREG32(mmSMC_IND_INDEX_0));
6547 dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n",
6548 RREG32(mmSMC_IND_DATA_0));
6549 dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n",
6550 RREG32(mmSMC_IND_ACCESS_CNTL));
6551 dev_info(adev->dev, " SMC_RESP_0=0x%08X\n",
6552 RREG32(mmSMC_RESP_0));
6553 dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n",
6554 RREG32(mmSMC_MESSAGE_0));
6555 dev_info(adev->dev, " SMC_SYSCON_RESET_CNTL=0x%08X\n",
6556 RREG32_SMC(ixSMC_SYSCON_RESET_CNTL));
6557 dev_info(adev->dev, " SMC_SYSCON_CLOCK_CNTL_0=0x%08X\n",
6558 RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0));
6559 dev_info(adev->dev, " SMC_SYSCON_MISC_CNTL=0x%08X\n",
6560 RREG32_SMC(ixSMC_SYSCON_MISC_CNTL));
6561 dev_info(adev->dev, " SMC_PC_C=0x%08X\n",
6562 RREG32_SMC(ixSMC_PC_C));
6563}
6564
5fc3aeeb 6565static int ci_dpm_soft_reset(void *handle)
a2e73f56
AD
6566{
6567 return 0;
6568}
6569
6570static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6571 struct amdgpu_irq_src *source,
6572 unsigned type,
6573 enum amdgpu_interrupt_state state)
6574{
6575 u32 cg_thermal_int;
6576
6577 switch (type) {
6578 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6579 switch (state) {
6580 case AMDGPU_IRQ_STATE_DISABLE:
6581 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
c305fd5f 6582 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
a2e73f56
AD
6583 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6584 break;
6585 case AMDGPU_IRQ_STATE_ENABLE:
6586 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
c305fd5f 6587 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
a2e73f56
AD
6588 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6589 break;
6590 default:
6591 break;
6592 }
6593 break;
6594
6595 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6596 switch (state) {
6597 case AMDGPU_IRQ_STATE_DISABLE:
6598 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
c305fd5f 6599 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
a2e73f56
AD
6600 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6601 break;
6602 case AMDGPU_IRQ_STATE_ENABLE:
6603 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
c305fd5f 6604 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
a2e73f56
AD
6605 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6606 break;
6607 default:
6608 break;
6609 }
6610 break;
6611
6612 default:
6613 break;
6614 }
6615 return 0;
6616}
6617
6618static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
6619 struct amdgpu_irq_src *source,
6620 struct amdgpu_iv_entry *entry)
6621{
6622 bool queue_thermal = false;
6623
6624 if (entry == NULL)
6625 return -EINVAL;
6626
6627 switch (entry->src_id) {
6628 case 230: /* thermal low to high */
6629 DRM_DEBUG("IH: thermal low to high\n");
6630 adev->pm.dpm.thermal.high_to_low = false;
6631 queue_thermal = true;
6632 break;
6633 case 231: /* thermal high to low */
6634 DRM_DEBUG("IH: thermal high to low\n");
6635 adev->pm.dpm.thermal.high_to_low = true;
6636 queue_thermal = true;
6637 break;
6638 default:
6639 break;
6640 }
6641
6642 if (queue_thermal)
6643 schedule_work(&adev->pm.dpm.thermal.work);
6644
6645 return 0;
6646}
6647
5fc3aeeb 6648static int ci_dpm_set_clockgating_state(void *handle,
6649 enum amd_clockgating_state state)
a2e73f56
AD
6650{
6651 return 0;
6652}
6653
5fc3aeeb 6654static int ci_dpm_set_powergating_state(void *handle,
6655 enum amd_powergating_state state)
a2e73f56
AD
6656{
6657 return 0;
6658}
6659
5fc3aeeb 6660const struct amd_ip_funcs ci_dpm_ip_funcs = {
a2e73f56
AD
6661 .early_init = ci_dpm_early_init,
6662 .late_init = ci_dpm_late_init,
6663 .sw_init = ci_dpm_sw_init,
6664 .sw_fini = ci_dpm_sw_fini,
6665 .hw_init = ci_dpm_hw_init,
6666 .hw_fini = ci_dpm_hw_fini,
6667 .suspend = ci_dpm_suspend,
6668 .resume = ci_dpm_resume,
6669 .is_idle = ci_dpm_is_idle,
6670 .wait_for_idle = ci_dpm_wait_for_idle,
6671 .soft_reset = ci_dpm_soft_reset,
6672 .print_status = ci_dpm_print_status,
6673 .set_clockgating_state = ci_dpm_set_clockgating_state,
6674 .set_powergating_state = ci_dpm_set_powergating_state,
6675};
6676
6677static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
6678 .get_temperature = &ci_dpm_get_temp,
6679 .pre_set_power_state = &ci_dpm_pre_set_power_state,
6680 .set_power_state = &ci_dpm_set_power_state,
6681 .post_set_power_state = &ci_dpm_post_set_power_state,
6682 .display_configuration_changed = &ci_dpm_display_configuration_changed,
6683 .get_sclk = &ci_dpm_get_sclk,
6684 .get_mclk = &ci_dpm_get_mclk,
6685 .print_power_state = &ci_dpm_print_power_state,
6686 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
6687 .force_performance_level = &ci_dpm_force_performance_level,
6688 .vblank_too_short = &ci_dpm_vblank_too_short,
6689 .powergate_uvd = &ci_dpm_powergate_uvd,
6690 .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
6691 .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
6692 .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
6693 .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
6694};
6695
6696static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
6697{
6698 if (adev->pm.funcs == NULL)
6699 adev->pm.funcs = &ci_dpm_funcs;
6700}
6701
6702static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
6703 .set = ci_dpm_set_interrupt_state,
6704 .process = ci_dpm_process_interrupt,
6705};
6706
6707static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
6708{
6709 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
6710 adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
6711}