Commit | Line | Data |
---|---|---|
2e9d4c05 AD |
1 | /* |
2 | * Copyright 2011 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Alex Deucher | |
23 | */ | |
24 | ||
25 | #include "drmP.h" | |
26 | #include "radeon.h" | |
27 | #include "r600d.h" | |
28 | #include "r600_dpm.h" | |
29 | #include "atom.h" | |
30 | ||
31 | const u32 r600_utc[R600_PM_NUMBER_OF_TC] = | |
32 | { | |
33 | R600_UTC_DFLT_00, | |
34 | R600_UTC_DFLT_01, | |
35 | R600_UTC_DFLT_02, | |
36 | R600_UTC_DFLT_03, | |
37 | R600_UTC_DFLT_04, | |
38 | R600_UTC_DFLT_05, | |
39 | R600_UTC_DFLT_06, | |
40 | R600_UTC_DFLT_07, | |
41 | R600_UTC_DFLT_08, | |
42 | R600_UTC_DFLT_09, | |
43 | R600_UTC_DFLT_10, | |
44 | R600_UTC_DFLT_11, | |
45 | R600_UTC_DFLT_12, | |
46 | R600_UTC_DFLT_13, | |
47 | R600_UTC_DFLT_14, | |
48 | }; | |
49 | ||
50 | const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = | |
51 | { | |
52 | R600_DTC_DFLT_00, | |
53 | R600_DTC_DFLT_01, | |
54 | R600_DTC_DFLT_02, | |
55 | R600_DTC_DFLT_03, | |
56 | R600_DTC_DFLT_04, | |
57 | R600_DTC_DFLT_05, | |
58 | R600_DTC_DFLT_06, | |
59 | R600_DTC_DFLT_07, | |
60 | R600_DTC_DFLT_08, | |
61 | R600_DTC_DFLT_09, | |
62 | R600_DTC_DFLT_10, | |
63 | R600_DTC_DFLT_11, | |
64 | R600_DTC_DFLT_12, | |
65 | R600_DTC_DFLT_13, | |
66 | R600_DTC_DFLT_14, | |
67 | }; | |
68 | ||
69 | void r600_dpm_print_class_info(u32 class, u32 class2) | |
70 | { | |
71 | printk("\tui class: "); | |
72 | switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { | |
73 | case ATOM_PPLIB_CLASSIFICATION_UI_NONE: | |
74 | default: | |
75 | printk("none\n"); | |
76 | break; | |
77 | case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: | |
78 | printk("battery\n"); | |
79 | break; | |
80 | case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: | |
81 | printk("balanced\n"); | |
82 | break; | |
83 | case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: | |
84 | printk("performance\n"); | |
85 | break; | |
86 | } | |
87 | printk("\tinternal class: "); | |
88 | if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && | |
89 | (class2 == 0)) | |
90 | printk("none"); | |
91 | else { | |
92 | if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) | |
93 | printk("boot "); | |
94 | if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) | |
95 | printk("thermal "); | |
96 | if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) | |
97 | printk("limited_pwr "); | |
98 | if (class & ATOM_PPLIB_CLASSIFICATION_REST) | |
99 | printk("rest "); | |
100 | if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) | |
101 | printk("forced "); | |
102 | if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) | |
103 | printk("3d_perf "); | |
104 | if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) | |
105 | printk("ovrdrv "); | |
106 | if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) | |
107 | printk("uvd "); | |
108 | if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) | |
109 | printk("3d_low "); | |
110 | if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) | |
111 | printk("acpi "); | |
112 | if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) | |
113 | printk("uvd_hd2 "); | |
114 | if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) | |
115 | printk("uvd_hd "); | |
116 | if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) | |
117 | printk("uvd_sd "); | |
118 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) | |
119 | printk("limited_pwr2 "); | |
120 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) | |
121 | printk("ulv "); | |
122 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) | |
123 | printk("uvd_mvc "); | |
124 | } | |
125 | printk("\n"); | |
126 | } | |
127 | ||
128 | void r600_dpm_print_cap_info(u32 caps) | |
129 | { | |
130 | printk("\tcaps: "); | |
131 | if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) | |
132 | printk("single_disp "); | |
133 | if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) | |
134 | printk("video "); | |
135 | if (caps & ATOM_PPLIB_DISALLOW_ON_DC) | |
136 | printk("no_dc "); | |
137 | printk("\n"); | |
138 | } | |
139 | ||
140 | void r600_dpm_print_ps_status(struct radeon_device *rdev, | |
141 | struct radeon_ps *rps) | |
142 | { | |
143 | printk("\tstatus: "); | |
144 | if (rps == rdev->pm.dpm.current_ps) | |
145 | printk("c "); | |
146 | if (rps == rdev->pm.dpm.requested_ps) | |
147 | printk("r "); | |
148 | if (rps == rdev->pm.dpm.boot_ps) | |
149 | printk("b "); | |
150 | printk("\n"); | |
151 | } | |
152 | ||
153 | void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, | |
154 | u32 *p, u32 *u) | |
155 | { | |
156 | u32 b_c = 0; | |
157 | u32 i_c; | |
158 | u32 tmp; | |
159 | ||
160 | i_c = (i * r_c) / 100; | |
161 | tmp = i_c >> p_b; | |
162 | ||
163 | while (tmp) { | |
164 | b_c++; | |
165 | tmp >>= 1; | |
166 | } | |
167 | ||
168 | *u = (b_c + 1) / 2; | |
169 | *p = i_c / (1 << (2 * (*u))); | |
170 | } | |
171 | ||
172 | int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) | |
173 | { | |
174 | u32 k, a, ah, al; | |
175 | u32 t1; | |
176 | ||
177 | if ((fl == 0) || (fh == 0) || (fl > fh)) | |
178 | return -EINVAL; | |
179 | ||
180 | k = (100 * fh) / fl; | |
181 | t1 = (t * (k - 100)); | |
182 | a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); | |
183 | a = (a + 5) / 10; | |
184 | ah = ((a * t) + 5000) / 10000; | |
185 | al = a - ah; | |
186 | ||
187 | *th = t - ah; | |
188 | *tl = t + al; | |
189 | ||
190 | return 0; | |
191 | } | |
192 | ||
193 | void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable) | |
194 | { | |
195 | int i; | |
196 | ||
197 | if (enable) { | |
198 | WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); | |
199 | } else { | |
200 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); | |
201 | ||
202 | WREG32(CG_RLC_REQ_AND_RSP, 0x2); | |
203 | ||
204 | for (i = 0; i < rdev->usec_timeout; i++) { | |
205 | if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1) | |
206 | break; | |
207 | udelay(1); | |
208 | } | |
209 | ||
210 | WREG32(CG_RLC_REQ_AND_RSP, 0x0); | |
211 | ||
212 | WREG32(GRBM_PWR_CNTL, 0x1); | |
213 | RREG32(GRBM_PWR_CNTL); | |
214 | } | |
215 | } | |
216 | ||
217 | void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable) | |
218 | { | |
219 | if (enable) | |
220 | WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); | |
221 | else | |
222 | WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); | |
223 | } | |
224 | ||
225 | void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable) | |
226 | { | |
227 | if (enable) | |
228 | WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); | |
229 | else | |
230 | WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); | |
231 | } | |
232 | ||
233 | void r600_enable_acpi_pm(struct radeon_device *rdev) | |
234 | { | |
235 | WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); | |
236 | } | |
237 | ||
238 | void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable) | |
239 | { | |
240 | if (enable) | |
241 | WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE); | |
242 | else | |
243 | WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE); | |
244 | } | |
245 | ||
246 | bool r600_dynamicpm_enabled(struct radeon_device *rdev) | |
247 | { | |
248 | if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN) | |
249 | return true; | |
250 | else | |
251 | return false; | |
252 | } | |
253 | ||
254 | void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) | |
255 | { | |
256 | if (enable) | |
257 | WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF); | |
258 | else | |
259 | WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); | |
260 | } | |
261 | ||
262 | void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) | |
263 | { | |
264 | if (enable) | |
265 | WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF); | |
266 | else | |
267 | WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF); | |
268 | } | |
269 | ||
270 | void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable) | |
271 | { | |
272 | if (enable) | |
273 | WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN); | |
274 | else | |
275 | WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN); | |
276 | } | |
277 | ||
278 | void r600_wait_for_spll_change(struct radeon_device *rdev) | |
279 | { | |
280 | int i; | |
281 | ||
282 | for (i = 0; i < rdev->usec_timeout; i++) { | |
283 | if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS) | |
284 | break; | |
285 | udelay(1); | |
286 | } | |
287 | } | |
288 | ||
289 | void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p) | |
290 | { | |
291 | WREG32(CG_BSP, BSP(p) | BSU(u)); | |
292 | } | |
293 | ||
294 | void r600_set_at(struct radeon_device *rdev, | |
295 | u32 l_to_m, u32 m_to_h, | |
296 | u32 h_to_m, u32 m_to_l) | |
297 | { | |
298 | WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h)); | |
299 | WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l)); | |
300 | } | |
301 | ||
302 | void r600_set_tc(struct radeon_device *rdev, | |
303 | u32 index, u32 u_t, u32 d_t) | |
304 | { | |
305 | WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t)); | |
306 | } | |
307 | ||
308 | void r600_select_td(struct radeon_device *rdev, | |
309 | enum r600_td td) | |
310 | { | |
311 | if (td == R600_TD_AUTO) | |
312 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); | |
313 | else | |
314 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); | |
315 | if (td == R600_TD_UP) | |
316 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); | |
317 | if (td == R600_TD_DOWN) | |
318 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); | |
319 | } | |
320 | ||
321 | void r600_set_vrc(struct radeon_device *rdev, u32 vrv) | |
322 | { | |
323 | WREG32(CG_FTV, vrv); | |
324 | } | |
325 | ||
326 | void r600_set_tpu(struct radeon_device *rdev, u32 u) | |
327 | { | |
328 | WREG32_P(CG_TPC, TPU(u), ~TPU_MASK); | |
329 | } | |
330 | ||
331 | void r600_set_tpc(struct radeon_device *rdev, u32 c) | |
332 | { | |
333 | WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK); | |
334 | } | |
335 | ||
336 | void r600_set_sstu(struct radeon_device *rdev, u32 u) | |
337 | { | |
338 | WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK); | |
339 | } | |
340 | ||
341 | void r600_set_sst(struct radeon_device *rdev, u32 t) | |
342 | { | |
343 | WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK); | |
344 | } | |
345 | ||
346 | void r600_set_git(struct radeon_device *rdev, u32 t) | |
347 | { | |
348 | WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK); | |
349 | } | |
350 | ||
351 | void r600_set_fctu(struct radeon_device *rdev, u32 u) | |
352 | { | |
353 | WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK); | |
354 | } | |
355 | ||
356 | void r600_set_fct(struct radeon_device *rdev, u32 t) | |
357 | { | |
358 | WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK); | |
359 | } | |
360 | ||
361 | void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p) | |
362 | { | |
363 | WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK); | |
364 | } | |
365 | ||
366 | void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s) | |
367 | { | |
368 | WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK); | |
369 | } | |
370 | ||
371 | void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u) | |
372 | { | |
373 | WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK); | |
374 | } | |
375 | ||
376 | void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p) | |
377 | { | |
378 | WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK); | |
379 | } | |
380 | ||
381 | void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s) | |
382 | { | |
383 | WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK); | |
384 | } | |
385 | ||
386 | void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time) | |
387 | { | |
388 | WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK); | |
389 | } | |
390 | ||
391 | void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time) | |
392 | { | |
393 | WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK); | |
394 | } | |
395 | ||
396 | void r600_engine_clock_entry_enable(struct radeon_device *rdev, | |
397 | u32 index, bool enable) | |
398 | { | |
399 | if (enable) | |
400 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | |
401 | STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID); | |
402 | else | |
403 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | |
404 | 0, ~STEP_0_SPLL_ENTRY_VALID); | |
405 | } | |
406 | ||
407 | void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev, | |
408 | u32 index, bool enable) | |
409 | { | |
410 | if (enable) | |
411 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | |
412 | STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE); | |
413 | else | |
414 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | |
415 | 0, ~STEP_0_SPLL_STEP_ENABLE); | |
416 | } | |
417 | ||
418 | void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev, | |
419 | u32 index, bool enable) | |
420 | { | |
421 | if (enable) | |
422 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | |
423 | STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN); | |
424 | else | |
425 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | |
426 | 0, ~STEP_0_POST_DIV_EN); | |
427 | } | |
428 | ||
429 | void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev, | |
430 | u32 index, u32 divider) | |
431 | { | |
432 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), | |
433 | STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK); | |
434 | } | |
435 | ||
436 | void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev, | |
437 | u32 index, u32 divider) | |
438 | { | |
439 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), | |
440 | STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK); | |
441 | } | |
442 | ||
443 | void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev, | |
444 | u32 index, u32 divider) | |
445 | { | |
446 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), | |
447 | STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK); | |
448 | } | |
449 | ||
450 | void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev, | |
451 | u32 index, u32 step_time) | |
452 | { | |
453 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), | |
454 | STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK); | |
455 | } | |
456 | ||
457 | void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u) | |
458 | { | |
459 | WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK); | |
460 | } | |
461 | ||
462 | void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u) | |
463 | { | |
464 | WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK); | |
465 | } | |
466 | ||
467 | void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt) | |
468 | { | |
469 | WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK); | |
470 | } | |
471 | ||
472 | void r600_voltage_control_enable_pins(struct radeon_device *rdev, | |
473 | u64 mask) | |
474 | { | |
475 | WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff); | |
476 | WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask)); | |
477 | } | |
478 | ||
479 | ||
480 | void r600_voltage_control_program_voltages(struct radeon_device *rdev, | |
481 | enum r600_power_level index, u64 pins) | |
482 | { | |
483 | u32 tmp, mask; | |
484 | u32 ix = 3 - (3 & index); | |
485 | ||
486 | WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff); | |
487 | ||
488 | mask = 7 << (3 * ix); | |
489 | tmp = RREG32(VID_UPPER_GPIO_CNTL); | |
490 | tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask); | |
491 | WREG32(VID_UPPER_GPIO_CNTL, tmp); | |
492 | } | |
493 | ||
494 | void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev, | |
495 | u64 mask) | |
496 | { | |
497 | u32 gpio; | |
498 | ||
499 | gpio = RREG32(GPIOPAD_MASK); | |
500 | gpio &= ~mask; | |
501 | WREG32(GPIOPAD_MASK, gpio); | |
502 | ||
503 | gpio = RREG32(GPIOPAD_EN); | |
504 | gpio &= ~mask; | |
505 | WREG32(GPIOPAD_EN, gpio); | |
506 | ||
507 | gpio = RREG32(GPIOPAD_A); | |
508 | gpio &= ~mask; | |
509 | WREG32(GPIOPAD_A, gpio); | |
510 | } | |
511 | ||
512 | void r600_power_level_enable(struct radeon_device *rdev, | |
513 | enum r600_power_level index, bool enable) | |
514 | { | |
515 | u32 ix = 3 - (3 & index); | |
516 | ||
517 | if (enable) | |
518 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE, | |
519 | ~CTXSW_FREQ_STATE_ENABLE); | |
520 | else | |
521 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0, | |
522 | ~CTXSW_FREQ_STATE_ENABLE); | |
523 | } | |
524 | ||
525 | void r600_power_level_set_voltage_index(struct radeon_device *rdev, | |
526 | enum r600_power_level index, u32 voltage_index) | |
527 | { | |
528 | u32 ix = 3 - (3 & index); | |
529 | ||
530 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), | |
531 | CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK); | |
532 | } | |
533 | ||
534 | void r600_power_level_set_mem_clock_index(struct radeon_device *rdev, | |
535 | enum r600_power_level index, u32 mem_clock_index) | |
536 | { | |
537 | u32 ix = 3 - (3 & index); | |
538 | ||
539 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), | |
540 | CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK); | |
541 | } | |
542 | ||
543 | void r600_power_level_set_eng_clock_index(struct radeon_device *rdev, | |
544 | enum r600_power_level index, u32 eng_clock_index) | |
545 | { | |
546 | u32 ix = 3 - (3 & index); | |
547 | ||
548 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), | |
549 | CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK); | |
550 | } | |
551 | ||
552 | void r600_power_level_set_watermark_id(struct radeon_device *rdev, | |
553 | enum r600_power_level index, | |
554 | enum r600_display_watermark watermark_id) | |
555 | { | |
556 | u32 ix = 3 - (3 & index); | |
557 | u32 tmp = 0; | |
558 | ||
559 | if (watermark_id == R600_DISPLAY_WATERMARK_HIGH) | |
560 | tmp = CTXSW_FREQ_DISPLAY_WATERMARK; | |
561 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK); | |
562 | } | |
563 | ||
564 | void r600_power_level_set_pcie_gen2(struct radeon_device *rdev, | |
565 | enum r600_power_level index, bool compatible) | |
566 | { | |
567 | u32 ix = 3 - (3 & index); | |
568 | u32 tmp = 0; | |
569 | ||
570 | if (compatible) | |
571 | tmp = CTXSW_FREQ_GEN2PCIE_VOLT; | |
572 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT); | |
573 | } | |
574 | ||
575 | enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev) | |
576 | { | |
577 | u32 tmp; | |
578 | ||
579 | tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK; | |
580 | tmp >>= CURRENT_PROFILE_INDEX_SHIFT; | |
581 | return tmp; | |
582 | } | |
583 | ||
584 | enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev) | |
585 | { | |
586 | u32 tmp; | |
587 | ||
588 | tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK; | |
589 | tmp >>= TARGET_PROFILE_INDEX_SHIFT; | |
590 | return tmp; | |
591 | } | |
592 | ||
593 | void r600_power_level_set_enter_index(struct radeon_device *rdev, | |
594 | enum r600_power_level index) | |
595 | { | |
596 | WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index), | |
597 | ~DYN_PWR_ENTER_INDEX_MASK); | |
598 | } | |
599 | ||
600 | void r600_wait_for_power_level_unequal(struct radeon_device *rdev, | |
601 | enum r600_power_level index) | |
602 | { | |
603 | int i; | |
604 | ||
605 | for (i = 0; i < rdev->usec_timeout; i++) { | |
606 | if (r600_power_level_get_target_index(rdev) != index) | |
607 | break; | |
608 | udelay(1); | |
609 | } | |
610 | ||
611 | for (i = 0; i < rdev->usec_timeout; i++) { | |
612 | if (r600_power_level_get_current_index(rdev) != index) | |
613 | break; | |
614 | udelay(1); | |
615 | } | |
616 | } | |
617 | ||
618 | void r600_wait_for_power_level(struct radeon_device *rdev, | |
619 | enum r600_power_level index) | |
620 | { | |
621 | int i; | |
622 | ||
623 | for (i = 0; i < rdev->usec_timeout; i++) { | |
624 | if (r600_power_level_get_target_index(rdev) == index) | |
625 | break; | |
626 | udelay(1); | |
627 | } | |
628 | ||
629 | for (i = 0; i < rdev->usec_timeout; i++) { | |
630 | if (r600_power_level_get_current_index(rdev) == index) | |
631 | break; | |
632 | udelay(1); | |
633 | } | |
634 | } | |
635 | ||
636 | void r600_start_dpm(struct radeon_device *rdev) | |
637 | { | |
638 | r600_enable_sclk_control(rdev, false); | |
639 | r600_enable_mclk_control(rdev, false); | |
640 | ||
641 | r600_dynamicpm_enable(rdev, true); | |
642 | ||
643 | radeon_wait_for_vblank(rdev, 0); | |
644 | radeon_wait_for_vblank(rdev, 1); | |
645 | ||
646 | r600_enable_spll_bypass(rdev, true); | |
647 | r600_wait_for_spll_change(rdev); | |
648 | r600_enable_spll_bypass(rdev, false); | |
649 | r600_wait_for_spll_change(rdev); | |
650 | ||
651 | r600_enable_spll_bypass(rdev, true); | |
652 | r600_wait_for_spll_change(rdev); | |
653 | r600_enable_spll_bypass(rdev, false); | |
654 | r600_wait_for_spll_change(rdev); | |
655 | ||
656 | r600_enable_sclk_control(rdev, true); | |
657 | r600_enable_mclk_control(rdev, true); | |
658 | } | |
659 | ||
660 | void r600_stop_dpm(struct radeon_device *rdev) | |
661 | { | |
662 | r600_dynamicpm_enable(rdev, false); | |
663 | } | |
664 | ||
98243917 AD |
665 | int r600_dpm_pre_set_power_state(struct radeon_device *rdev) |
666 | { | |
667 | return 0; | |
668 | } | |
669 | ||
670 | void r600_dpm_post_set_power_state(struct radeon_device *rdev) | |
671 | { | |
672 | ||
673 | } | |
674 | ||
2e9d4c05 AD |
675 | bool r600_is_uvd_state(u32 class, u32 class2) |
676 | { | |
677 | if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) | |
678 | return true; | |
679 | if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) | |
680 | return true; | |
681 | if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) | |
682 | return true; | |
683 | if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) | |
684 | return true; | |
685 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) | |
686 | return true; | |
687 | return false; | |
688 | } | |
4a6369e9 AD |
689 | |
690 | int r600_set_thermal_temperature_range(struct radeon_device *rdev, | |
691 | int min_temp, int max_temp) | |
692 | { | |
693 | int low_temp = 0 * 1000; | |
694 | int high_temp = 255 * 1000; | |
695 | ||
696 | if (low_temp < min_temp) | |
697 | low_temp = min_temp; | |
698 | if (high_temp > max_temp) | |
699 | high_temp = max_temp; | |
700 | if (high_temp < low_temp) { | |
701 | DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); | |
702 | return -EINVAL; | |
703 | } | |
704 | ||
705 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); | |
706 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); | |
707 | WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); | |
708 | ||
709 | rdev->pm.dpm.thermal.min_temp = low_temp; | |
710 | rdev->pm.dpm.thermal.max_temp = high_temp; | |
711 | ||
712 | return 0; | |
713 | } | |
714 | ||
715 | bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor) | |
716 | { | |
717 | switch (sensor) { | |
718 | case THERMAL_TYPE_RV6XX: | |
719 | case THERMAL_TYPE_RV770: | |
720 | case THERMAL_TYPE_EVERGREEN: | |
721 | case THERMAL_TYPE_SUMO: | |
722 | case THERMAL_TYPE_NI: | |
ac163387 | 723 | case THERMAL_TYPE_SI: |
4a6369e9 AD |
724 | return true; |
725 | case THERMAL_TYPE_ADT7473_WITH_INTERNAL: | |
726 | case THERMAL_TYPE_EMC2103_WITH_INTERNAL: | |
727 | return false; /* need special handling */ | |
728 | case THERMAL_TYPE_NONE: | |
729 | case THERMAL_TYPE_EXTERNAL: | |
730 | case THERMAL_TYPE_EXTERNAL_GPIO: | |
731 | default: | |
732 | return false; | |
733 | } | |
734 | } | |
61b7d601 AD |
735 | |
736 | union power_info { | |
737 | struct _ATOM_POWERPLAY_INFO info; | |
738 | struct _ATOM_POWERPLAY_INFO_V2 info_2; | |
739 | struct _ATOM_POWERPLAY_INFO_V3 info_3; | |
740 | struct _ATOM_PPLIB_POWERPLAYTABLE pplib; | |
741 | struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; | |
742 | struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; | |
743 | struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; | |
744 | struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; | |
745 | }; | |
746 | ||
747 | union fan_info { | |
748 | struct _ATOM_PPLIB_FANTABLE fan; | |
749 | struct _ATOM_PPLIB_FANTABLE2 fan2; | |
750 | }; | |
751 | ||
752 | static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table, | |
753 | ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) | |
754 | { | |
755 | u32 size = atom_table->ucNumEntries * | |
756 | sizeof(struct radeon_clock_voltage_dependency_entry); | |
757 | int i; | |
758 | ||
759 | radeon_table->entries = kzalloc(size, GFP_KERNEL); | |
760 | if (!radeon_table->entries) | |
761 | return -ENOMEM; | |
762 | ||
763 | for (i = 0; i < atom_table->ucNumEntries; i++) { | |
764 | radeon_table->entries[i].clk = le16_to_cpu(atom_table->entries[i].usClockLow) | | |
765 | (atom_table->entries[i].ucClockHigh << 16); | |
766 | radeon_table->entries[i].v = le16_to_cpu(atom_table->entries[i].usVoltage); | |
767 | } | |
768 | radeon_table->count = atom_table->ucNumEntries; | |
769 | ||
770 | return 0; | |
771 | } | |
772 | ||
a5cb318e AD |
773 | /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ |
774 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 | |
775 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 | |
776 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 | |
777 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 | |
778 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 | |
779 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 | |
780 | ||
61b7d601 AD |
781 | int r600_parse_extended_power_table(struct radeon_device *rdev) |
782 | { | |
783 | struct radeon_mode_info *mode_info = &rdev->mode_info; | |
784 | union power_info *power_info; | |
785 | union fan_info *fan_info; | |
786 | ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; | |
787 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | |
788 | u16 data_offset; | |
789 | u8 frev, crev; | |
790 | int ret, i; | |
791 | ||
792 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, | |
793 | &frev, &crev, &data_offset)) | |
794 | return -EINVAL; | |
795 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | |
796 | ||
797 | /* fan table */ | |
9985318b AD |
798 | if (le16_to_cpu(power_info->pplib.usTableSize) >= |
799 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { | |
61b7d601 AD |
800 | if (power_info->pplib3.usFanTableOffset) { |
801 | fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + | |
802 | le16_to_cpu(power_info->pplib3.usFanTableOffset)); | |
803 | rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; | |
804 | rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); | |
805 | rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); | |
806 | rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); | |
807 | rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); | |
808 | rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); | |
809 | rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); | |
810 | if (fan_info->fan.ucFanTableFormat >= 2) | |
811 | rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); | |
812 | else | |
813 | rdev->pm.dpm.fan.t_max = 10900; | |
814 | rdev->pm.dpm.fan.cycle_delay = 100000; | |
815 | rdev->pm.dpm.fan.ucode_fan_control = true; | |
816 | } | |
817 | } | |
818 | ||
929ee7a8 | 819 | /* clock dependancy tables, shedding tables */ |
9985318b AD |
820 | if (le16_to_cpu(power_info->pplib.usTableSize) >= |
821 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { | |
61b7d601 AD |
822 | if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { |
823 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | |
824 | (mode_info->atom_context->bios + data_offset + | |
825 | le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); | |
826 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, | |
827 | dep_table); | |
828 | if (ret) | |
829 | return ret; | |
830 | } | |
831 | if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { | |
832 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | |
833 | (mode_info->atom_context->bios + data_offset + | |
834 | le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); | |
835 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, | |
836 | dep_table); | |
837 | if (ret) { | |
838 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | |
839 | return ret; | |
840 | } | |
841 | } | |
842 | if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { | |
843 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | |
844 | (mode_info->atom_context->bios + data_offset + | |
845 | le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); | |
846 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, | |
847 | dep_table); | |
848 | if (ret) { | |
849 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | |
850 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); | |
851 | return ret; | |
852 | } | |
853 | } | |
854 | if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { | |
855 | ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = | |
856 | (ATOM_PPLIB_Clock_Voltage_Limit_Table *) | |
857 | (mode_info->atom_context->bios + data_offset + | |
858 | le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); | |
859 | if (clk_v->ucNumEntries) { | |
860 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = | |
861 | le16_to_cpu(clk_v->entries[0].usSclkLow) | | |
862 | (clk_v->entries[0].ucSclkHigh << 16); | |
863 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = | |
864 | le16_to_cpu(clk_v->entries[0].usMclkLow) | | |
865 | (clk_v->entries[0].ucMclkHigh << 16); | |
866 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = | |
867 | le16_to_cpu(clk_v->entries[0].usVddc); | |
868 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = | |
869 | le16_to_cpu(clk_v->entries[0].usVddci); | |
870 | } | |
871 | } | |
929ee7a8 AD |
872 | if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { |
873 | ATOM_PPLIB_PhaseSheddingLimits_Table *psl = | |
874 | (ATOM_PPLIB_PhaseSheddingLimits_Table *) | |
875 | (mode_info->atom_context->bios + data_offset + | |
876 | le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); | |
877 | ||
878 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = | |
879 | kzalloc(psl->ucNumEntries * | |
880 | sizeof(struct radeon_phase_shedding_limits_entry), | |
881 | GFP_KERNEL); | |
f907eec0 AD |
882 | if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { |
883 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | |
884 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); | |
885 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); | |
929ee7a8 | 886 | return -ENOMEM; |
f907eec0 | 887 | } |
929ee7a8 AD |
888 | |
889 | for (i = 0; i < psl->ucNumEntries; i++) { | |
890 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = | |
891 | le16_to_cpu(psl->entries[i].usSclkLow) | | |
892 | (psl->entries[i].ucSclkHigh << 16); | |
893 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = | |
894 | le16_to_cpu(psl->entries[i].usMclkLow) | | |
895 | (psl->entries[i].ucMclkHigh << 16); | |
896 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = | |
897 | le16_to_cpu(psl->entries[i].usVoltage); | |
898 | } | |
899 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count = | |
900 | psl->ucNumEntries; | |
901 | } | |
61b7d601 AD |
902 | } |
903 | ||
904 | /* cac data */ | |
9985318b AD |
905 | if (le16_to_cpu(power_info->pplib.usTableSize) >= |
906 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { | |
61b7d601 AD |
907 | rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); |
908 | rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); | |
a9e61410 | 909 | rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit; |
61b7d601 AD |
910 | rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); |
911 | if (rdev->pm.dpm.tdp_od_limit) | |
912 | rdev->pm.dpm.power_control = true; | |
913 | else | |
914 | rdev->pm.dpm.power_control = false; | |
915 | rdev->pm.dpm.tdp_adjustment = 0; | |
916 | rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); | |
917 | rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); | |
918 | rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); | |
919 | if (power_info->pplib5.usCACLeakageTableOffset) { | |
920 | ATOM_PPLIB_CAC_Leakage_Table *cac_table = | |
921 | (ATOM_PPLIB_CAC_Leakage_Table *) | |
922 | (mode_info->atom_context->bios + data_offset + | |
923 | le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); | |
924 | u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table); | |
925 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); | |
926 | if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { | |
927 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | |
928 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); | |
929 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); | |
930 | return -ENOMEM; | |
931 | } | |
932 | for (i = 0; i < cac_table->ucNumEntries; i++) { | |
933 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = | |
934 | le16_to_cpu(cac_table->entries[i].usVddc); | |
935 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = | |
936 | le32_to_cpu(cac_table->entries[i].ulLeakageValue); | |
937 | } | |
938 | rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; | |
939 | } | |
940 | } | |
941 | ||
a5cb318e AD |
942 | /* ppm table */ |
943 | if (le16_to_cpu(power_info->pplib.usTableSize) >= | |
944 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { | |
945 | ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) | |
946 | (mode_info->atom_context->bios + data_offset + | |
947 | le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); | |
948 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && | |
949 | ext_hdr->usPPMTableOffset) { | |
950 | ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) | |
951 | (mode_info->atom_context->bios + data_offset + | |
952 | le16_to_cpu(ext_hdr->usPPMTableOffset)); | |
953 | rdev->pm.dpm.dyn_state.ppm_table = | |
954 | kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL); | |
f907eec0 AD |
955 | if (!rdev->pm.dpm.dyn_state.ppm_table) { |
956 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | |
957 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); | |
958 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); | |
959 | kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); | |
a5cb318e | 960 | return -ENOMEM; |
f907eec0 | 961 | } |
a5cb318e AD |
962 | rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; |
963 | rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number = | |
964 | le16_to_cpu(ppm->usCpuCoreNumber); | |
965 | rdev->pm.dpm.dyn_state.ppm_table->platform_tdp = | |
966 | le32_to_cpu(ppm->ulPlatformTDP); | |
967 | rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = | |
968 | le32_to_cpu(ppm->ulSmallACPlatformTDP); | |
969 | rdev->pm.dpm.dyn_state.ppm_table->platform_tdc = | |
970 | le32_to_cpu(ppm->ulPlatformTDC); | |
971 | rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = | |
972 | le32_to_cpu(ppm->ulSmallACPlatformTDC); | |
973 | rdev->pm.dpm.dyn_state.ppm_table->apu_tdp = | |
974 | le32_to_cpu(ppm->ulApuTDP); | |
975 | rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = | |
976 | le32_to_cpu(ppm->ulDGpuTDP); | |
977 | rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = | |
978 | le32_to_cpu(ppm->ulDGpuUlvPower); | |
979 | rdev->pm.dpm.dyn_state.ppm_table->tj_max = | |
980 | le32_to_cpu(ppm->ulTjmax); | |
981 | } | |
982 | } | |
983 | ||
61b7d601 AD |
984 | return 0; |
985 | } | |
986 | ||
987 | void r600_free_extended_power_table(struct radeon_device *rdev) | |
988 | { | |
989 | if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries) | |
990 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | |
991 | if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) | |
992 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); | |
993 | if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) | |
994 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); | |
995 | if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) | |
996 | kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); | |
929ee7a8 AD |
997 | if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) |
998 | kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries); | |
a5cb318e AD |
999 | if (rdev->pm.dpm.dyn_state.ppm_table) |
1000 | kfree(rdev->pm.dpm.dyn_state.ppm_table); | |
61b7d601 | 1001 | } |
4bd9f516 AD |
1002 | |
1003 | enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, | |
1004 | u32 sys_mask, | |
1005 | enum radeon_pcie_gen asic_gen, | |
1006 | enum radeon_pcie_gen default_gen) | |
1007 | { | |
1008 | switch (asic_gen) { | |
1009 | case RADEON_PCIE_GEN1: | |
1010 | return RADEON_PCIE_GEN1; | |
1011 | case RADEON_PCIE_GEN2: | |
1012 | return RADEON_PCIE_GEN2; | |
1013 | case RADEON_PCIE_GEN3: | |
1014 | return RADEON_PCIE_GEN3; | |
1015 | default: | |
1016 | if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3)) | |
1017 | return RADEON_PCIE_GEN3; | |
1018 | else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2)) | |
1019 | return RADEON_PCIE_GEN2; | |
1020 | else | |
1021 | return RADEON_PCIE_GEN1; | |
1022 | } | |
1023 | return RADEON_PCIE_GEN1; | |
1024 | } |