2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
27 #include "amdgpu_atombios.h"
28 #include "amdgpu_i2c.h"
29 #include "amdgpu_dpm.h"
33 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
37 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
38 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
42 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
45 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
48 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
52 printk("\tui class: %s\n", s);
53 printk("\tinternal class:");
54 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
58 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
60 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
62 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
63 pr_cont(" limited_pwr");
64 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
66 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
68 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
70 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
72 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
74 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
76 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
78 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
80 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
82 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
84 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
85 pr_cont(" limited_pwr2");
86 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
88 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
94 void amdgpu_dpm_print_cap_info(u32 caps)
97 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
98 pr_cont(" single_disp");
99 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
101 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
106 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
107 struct amdgpu_ps *rps)
110 if (rps == adev->pm.dpm.current_ps)
112 if (rps == adev->pm.dpm.requested_ps)
114 if (rps == adev->pm.dpm.boot_ps)
119 void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
121 struct drm_device *ddev = adev->ddev;
122 struct drm_crtc *crtc;
123 struct amdgpu_crtc *amdgpu_crtc;
125 adev->pm.dpm.new_active_crtcs = 0;
126 adev->pm.dpm.new_active_crtc_count = 0;
127 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
128 list_for_each_entry(crtc,
129 &ddev->mode_config.crtc_list, head) {
130 amdgpu_crtc = to_amdgpu_crtc(crtc);
131 if (amdgpu_crtc->enabled) {
132 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
133 adev->pm.dpm.new_active_crtc_count++;
140 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
142 struct drm_device *dev = adev->ddev;
143 struct drm_crtc *crtc;
144 struct amdgpu_crtc *amdgpu_crtc;
145 u32 vblank_in_pixels;
146 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
148 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
149 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
150 amdgpu_crtc = to_amdgpu_crtc(crtc);
151 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
153 amdgpu_crtc->hw_mode.crtc_htotal *
154 (amdgpu_crtc->hw_mode.crtc_vblank_end -
155 amdgpu_crtc->hw_mode.crtc_vdisplay +
156 (amdgpu_crtc->v_border * 2));
158 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
164 return vblank_time_us;
167 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
169 struct drm_device *dev = adev->ddev;
170 struct drm_crtc *crtc;
171 struct amdgpu_crtc *amdgpu_crtc;
174 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
175 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
176 amdgpu_crtc = to_amdgpu_crtc(crtc);
177 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
178 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
187 void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
194 i_c = (i * r_c) / 100;
203 *p = i_c / (1 << (2 * (*u)));
206 int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
211 if ((fl == 0) || (fh == 0) || (fl > fh))
215 t1 = (t * (k - 100));
216 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
218 ah = ((a * t) + 5000) / 10000;
227 bool amdgpu_is_uvd_state(u32 class, u32 class2)
229 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
231 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
233 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
235 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
237 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
242 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
245 case THERMAL_TYPE_RV6XX:
246 case THERMAL_TYPE_RV770:
247 case THERMAL_TYPE_EVERGREEN:
248 case THERMAL_TYPE_SUMO:
249 case THERMAL_TYPE_NI:
250 case THERMAL_TYPE_SI:
251 case THERMAL_TYPE_CI:
252 case THERMAL_TYPE_KV:
254 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
255 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
256 return false; /* need special handling */
257 case THERMAL_TYPE_NONE:
258 case THERMAL_TYPE_EXTERNAL:
259 case THERMAL_TYPE_EXTERNAL_GPIO:
266 struct _ATOM_POWERPLAY_INFO info;
267 struct _ATOM_POWERPLAY_INFO_V2 info_2;
268 struct _ATOM_POWERPLAY_INFO_V3 info_3;
269 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
270 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
271 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
272 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
273 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
277 struct _ATOM_PPLIB_FANTABLE fan;
278 struct _ATOM_PPLIB_FANTABLE2 fan2;
279 struct _ATOM_PPLIB_FANTABLE3 fan3;
282 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
283 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
285 u32 size = atom_table->ucNumEntries *
286 sizeof(struct amdgpu_clock_voltage_dependency_entry);
288 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
290 amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
291 if (!amdgpu_table->entries)
294 entry = &atom_table->entries[0];
295 for (i = 0; i < atom_table->ucNumEntries; i++) {
296 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
297 (entry->ucClockHigh << 16);
298 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
299 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
300 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
302 amdgpu_table->count = atom_table->ucNumEntries;
307 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
309 struct amdgpu_mode_info *mode_info = &adev->mode_info;
310 union power_info *power_info;
311 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
315 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
316 &frev, &crev, &data_offset))
318 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
320 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
321 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
322 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
327 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
328 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
329 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
330 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
331 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
332 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
333 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
334 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
335 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
337 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
339 struct amdgpu_mode_info *mode_info = &adev->mode_info;
340 union power_info *power_info;
341 union fan_info *fan_info;
342 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
343 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
348 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
349 &frev, &crev, &data_offset))
351 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
354 if (le16_to_cpu(power_info->pplib.usTableSize) >=
355 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
356 if (power_info->pplib3.usFanTableOffset) {
357 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
358 le16_to_cpu(power_info->pplib3.usFanTableOffset));
359 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
360 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
361 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
362 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
363 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
364 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
365 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
366 if (fan_info->fan.ucFanTableFormat >= 2)
367 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
369 adev->pm.dpm.fan.t_max = 10900;
370 adev->pm.dpm.fan.cycle_delay = 100000;
371 if (fan_info->fan.ucFanTableFormat >= 3) {
372 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
373 adev->pm.dpm.fan.default_max_fan_pwm =
374 le16_to_cpu(fan_info->fan3.usFanPWMMax);
375 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
376 adev->pm.dpm.fan.fan_output_sensitivity =
377 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
379 adev->pm.dpm.fan.ucode_fan_control = true;
383 /* clock dependancy tables, shedding tables */
384 if (le16_to_cpu(power_info->pplib.usTableSize) >=
385 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
386 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
387 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
388 (mode_info->atom_context->bios + data_offset +
389 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
390 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
393 amdgpu_free_extended_power_table(adev);
397 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
398 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
399 (mode_info->atom_context->bios + data_offset +
400 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
401 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
404 amdgpu_free_extended_power_table(adev);
408 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
409 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
410 (mode_info->atom_context->bios + data_offset +
411 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
412 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
415 amdgpu_free_extended_power_table(adev);
419 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
420 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
421 (mode_info->atom_context->bios + data_offset +
422 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
423 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
426 amdgpu_free_extended_power_table(adev);
430 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
431 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
432 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
433 (mode_info->atom_context->bios + data_offset +
434 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
435 if (clk_v->ucNumEntries) {
436 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
437 le16_to_cpu(clk_v->entries[0].usSclkLow) |
438 (clk_v->entries[0].ucSclkHigh << 16);
439 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
440 le16_to_cpu(clk_v->entries[0].usMclkLow) |
441 (clk_v->entries[0].ucMclkHigh << 16);
442 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
443 le16_to_cpu(clk_v->entries[0].usVddc);
444 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
445 le16_to_cpu(clk_v->entries[0].usVddci);
448 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
449 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
450 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
451 (mode_info->atom_context->bios + data_offset +
452 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
453 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
455 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
456 kcalloc(psl->ucNumEntries,
457 sizeof(struct amdgpu_phase_shedding_limits_entry),
459 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
460 amdgpu_free_extended_power_table(adev);
464 entry = &psl->entries[0];
465 for (i = 0; i < psl->ucNumEntries; i++) {
466 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
467 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
468 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
469 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
470 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
471 le16_to_cpu(entry->usVoltage);
472 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
473 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
475 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
481 if (le16_to_cpu(power_info->pplib.usTableSize) >=
482 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
483 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
484 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
485 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
486 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
487 if (adev->pm.dpm.tdp_od_limit)
488 adev->pm.dpm.power_control = true;
490 adev->pm.dpm.power_control = false;
491 adev->pm.dpm.tdp_adjustment = 0;
492 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
493 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
494 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
495 if (power_info->pplib5.usCACLeakageTableOffset) {
496 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
497 (ATOM_PPLIB_CAC_Leakage_Table *)
498 (mode_info->atom_context->bios + data_offset +
499 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
500 ATOM_PPLIB_CAC_Leakage_Record *entry;
501 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
502 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
503 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
504 amdgpu_free_extended_power_table(adev);
507 entry = &cac_table->entries[0];
508 for (i = 0; i < cac_table->ucNumEntries; i++) {
509 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
510 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
511 le16_to_cpu(entry->usVddc1);
512 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
513 le16_to_cpu(entry->usVddc2);
514 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
515 le16_to_cpu(entry->usVddc3);
517 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
518 le16_to_cpu(entry->usVddc);
519 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
520 le32_to_cpu(entry->ulLeakageValue);
522 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
523 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
525 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
530 if (le16_to_cpu(power_info->pplib.usTableSize) >=
531 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
532 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
533 (mode_info->atom_context->bios + data_offset +
534 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
535 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
536 ext_hdr->usVCETableOffset) {
537 VCEClockInfoArray *array = (VCEClockInfoArray *)
538 (mode_info->atom_context->bios + data_offset +
539 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
540 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
541 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
542 (mode_info->atom_context->bios + data_offset +
543 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
544 1 + array->ucNumEntries * sizeof(VCEClockInfo));
545 ATOM_PPLIB_VCE_State_Table *states =
546 (ATOM_PPLIB_VCE_State_Table *)
547 (mode_info->atom_context->bios + data_offset +
548 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
549 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
550 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
551 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
552 ATOM_PPLIB_VCE_State_Record *state_entry;
553 VCEClockInfo *vce_clk;
554 u32 size = limits->numEntries *
555 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
556 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
557 kzalloc(size, GFP_KERNEL);
558 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
559 amdgpu_free_extended_power_table(adev);
562 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
564 entry = &limits->entries[0];
565 state_entry = &states->entries[0];
566 for (i = 0; i < limits->numEntries; i++) {
567 vce_clk = (VCEClockInfo *)
568 ((u8 *)&array->entries[0] +
569 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
570 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
571 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
572 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
573 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
574 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
575 le16_to_cpu(entry->usVoltage);
576 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
577 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
579 adev->pm.dpm.num_of_vce_states =
580 states->numEntries > AMD_MAX_VCE_LEVELS ?
581 AMD_MAX_VCE_LEVELS : states->numEntries;
582 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
583 vce_clk = (VCEClockInfo *)
584 ((u8 *)&array->entries[0] +
585 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
586 adev->pm.dpm.vce_states[i].evclk =
587 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
588 adev->pm.dpm.vce_states[i].ecclk =
589 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
590 adev->pm.dpm.vce_states[i].clk_idx =
591 state_entry->ucClockInfoIndex & 0x3f;
592 adev->pm.dpm.vce_states[i].pstate =
593 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
594 state_entry = (ATOM_PPLIB_VCE_State_Record *)
595 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
598 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
599 ext_hdr->usUVDTableOffset) {
600 UVDClockInfoArray *array = (UVDClockInfoArray *)
601 (mode_info->atom_context->bios + data_offset +
602 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
603 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
604 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
605 (mode_info->atom_context->bios + data_offset +
606 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
607 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
608 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
609 u32 size = limits->numEntries *
610 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
611 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
612 kzalloc(size, GFP_KERNEL);
613 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
614 amdgpu_free_extended_power_table(adev);
617 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
619 entry = &limits->entries[0];
620 for (i = 0; i < limits->numEntries; i++) {
621 UVDClockInfo *uvd_clk = (UVDClockInfo *)
622 ((u8 *)&array->entries[0] +
623 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
624 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
625 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
626 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
627 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
628 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
629 le16_to_cpu(entry->usVoltage);
630 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
631 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
634 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
635 ext_hdr->usSAMUTableOffset) {
636 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
637 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
638 (mode_info->atom_context->bios + data_offset +
639 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
640 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
641 u32 size = limits->numEntries *
642 sizeof(struct amdgpu_clock_voltage_dependency_entry);
643 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
644 kzalloc(size, GFP_KERNEL);
645 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
646 amdgpu_free_extended_power_table(adev);
649 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
651 entry = &limits->entries[0];
652 for (i = 0; i < limits->numEntries; i++) {
653 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
654 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
655 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
656 le16_to_cpu(entry->usVoltage);
657 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
658 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
661 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
662 ext_hdr->usPPMTableOffset) {
663 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
664 (mode_info->atom_context->bios + data_offset +
665 le16_to_cpu(ext_hdr->usPPMTableOffset));
666 adev->pm.dpm.dyn_state.ppm_table =
667 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
668 if (!adev->pm.dpm.dyn_state.ppm_table) {
669 amdgpu_free_extended_power_table(adev);
672 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
673 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
674 le16_to_cpu(ppm->usCpuCoreNumber);
675 adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
676 le32_to_cpu(ppm->ulPlatformTDP);
677 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
678 le32_to_cpu(ppm->ulSmallACPlatformTDP);
679 adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
680 le32_to_cpu(ppm->ulPlatformTDC);
681 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
682 le32_to_cpu(ppm->ulSmallACPlatformTDC);
683 adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
684 le32_to_cpu(ppm->ulApuTDP);
685 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
686 le32_to_cpu(ppm->ulDGpuTDP);
687 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
688 le32_to_cpu(ppm->ulDGpuUlvPower);
689 adev->pm.dpm.dyn_state.ppm_table->tj_max =
690 le32_to_cpu(ppm->ulTjmax);
692 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
693 ext_hdr->usACPTableOffset) {
694 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
695 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
696 (mode_info->atom_context->bios + data_offset +
697 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
698 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
699 u32 size = limits->numEntries *
700 sizeof(struct amdgpu_clock_voltage_dependency_entry);
701 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
702 kzalloc(size, GFP_KERNEL);
703 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
704 amdgpu_free_extended_power_table(adev);
707 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
709 entry = &limits->entries[0];
710 for (i = 0; i < limits->numEntries; i++) {
711 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
712 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
713 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
714 le16_to_cpu(entry->usVoltage);
715 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
716 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
719 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
720 ext_hdr->usPowerTuneTableOffset) {
721 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
722 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
723 ATOM_PowerTune_Table *pt;
724 adev->pm.dpm.dyn_state.cac_tdp_table =
725 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
726 if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
727 amdgpu_free_extended_power_table(adev);
731 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
732 (mode_info->atom_context->bios + data_offset +
733 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
734 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
735 ppt->usMaximumPowerDeliveryLimit;
736 pt = &ppt->power_tune_table;
738 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
739 (mode_info->atom_context->bios + data_offset +
740 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
741 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
742 pt = &ppt->power_tune_table;
744 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
745 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
746 le16_to_cpu(pt->usConfigurableTDP);
747 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
748 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
749 le16_to_cpu(pt->usBatteryPowerLimit);
750 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
751 le16_to_cpu(pt->usSmallPowerLimit);
752 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
753 le16_to_cpu(pt->usLowCACLeakage);
754 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
755 le16_to_cpu(pt->usHighCACLeakage);
757 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
758 ext_hdr->usSclkVddgfxTableOffset) {
759 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
760 (mode_info->atom_context->bios + data_offset +
761 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
762 ret = amdgpu_parse_clk_voltage_dep_table(
763 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
766 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
775 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
777 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
779 kfree(dyn_state->vddc_dependency_on_sclk.entries);
780 kfree(dyn_state->vddci_dependency_on_mclk.entries);
781 kfree(dyn_state->vddc_dependency_on_mclk.entries);
782 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
783 kfree(dyn_state->cac_leakage_table.entries);
784 kfree(dyn_state->phase_shedding_limits_table.entries);
785 kfree(dyn_state->ppm_table);
786 kfree(dyn_state->cac_tdp_table);
787 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
788 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
789 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
790 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
791 kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
794 static const char *pp_lib_thermal_controller_names[] = {
817 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
819 struct amdgpu_mode_info *mode_info = &adev->mode_info;
820 ATOM_PPLIB_POWERPLAYTABLE *power_table;
821 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
822 ATOM_PPLIB_THERMALCONTROLLER *controller;
823 struct amdgpu_i2c_bus_rec i2c_bus;
827 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
828 &frev, &crev, &data_offset))
830 power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
831 (mode_info->atom_context->bios + data_offset);
832 controller = &power_table->sThermalController;
834 /* add the i2c bus for thermal/fan chip */
835 if (controller->ucType > 0) {
836 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
837 adev->pm.no_fan = true;
838 adev->pm.fan_pulses_per_revolution =
839 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
840 if (adev->pm.fan_pulses_per_revolution) {
841 adev->pm.fan_min_rpm = controller->ucFanMinRPM;
842 adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
844 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
845 DRM_INFO("Internal thermal controller %s fan control\n",
846 (controller->ucFanParameters &
847 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
848 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
849 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
850 DRM_INFO("Internal thermal controller %s fan control\n",
851 (controller->ucFanParameters &
852 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
853 adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
854 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
855 DRM_INFO("Internal thermal controller %s fan control\n",
856 (controller->ucFanParameters &
857 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
858 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
859 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
860 DRM_INFO("Internal thermal controller %s fan control\n",
861 (controller->ucFanParameters &
862 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
863 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
864 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
865 DRM_INFO("Internal thermal controller %s fan control\n",
866 (controller->ucFanParameters &
867 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
868 adev->pm.int_thermal_type = THERMAL_TYPE_NI;
869 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
870 DRM_INFO("Internal thermal controller %s fan control\n",
871 (controller->ucFanParameters &
872 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
873 adev->pm.int_thermal_type = THERMAL_TYPE_SI;
874 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
875 DRM_INFO("Internal thermal controller %s fan control\n",
876 (controller->ucFanParameters &
877 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
878 adev->pm.int_thermal_type = THERMAL_TYPE_CI;
879 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
880 DRM_INFO("Internal thermal controller %s fan control\n",
881 (controller->ucFanParameters &
882 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
883 adev->pm.int_thermal_type = THERMAL_TYPE_KV;
884 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
885 DRM_INFO("External GPIO thermal controller %s fan control\n",
886 (controller->ucFanParameters &
887 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
888 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
889 } else if (controller->ucType ==
890 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
891 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
892 (controller->ucFanParameters &
893 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
894 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
895 } else if (controller->ucType ==
896 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
897 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
898 (controller->ucFanParameters &
899 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
900 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
901 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
902 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
903 pp_lib_thermal_controller_names[controller->ucType],
904 controller->ucI2cAddress >> 1,
905 (controller->ucFanParameters &
906 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
907 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
908 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
909 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
910 if (adev->pm.i2c_bus) {
911 struct i2c_board_info info = { };
912 const char *name = pp_lib_thermal_controller_names[controller->ucType];
913 info.addr = controller->ucI2cAddress >> 1;
914 strlcpy(info.type, name, sizeof(info.type));
915 i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
918 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
920 controller->ucI2cAddress >> 1,
921 (controller->ucFanParameters &
922 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
927 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
929 enum amdgpu_pcie_gen asic_gen,
930 enum amdgpu_pcie_gen default_gen)
933 case AMDGPU_PCIE_GEN1:
934 return AMDGPU_PCIE_GEN1;
935 case AMDGPU_PCIE_GEN2:
936 return AMDGPU_PCIE_GEN2;
937 case AMDGPU_PCIE_GEN3:
938 return AMDGPU_PCIE_GEN3;
940 if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
941 (default_gen == AMDGPU_PCIE_GEN3))
942 return AMDGPU_PCIE_GEN3;
943 else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
944 (default_gen == AMDGPU_PCIE_GEN2))
945 return AMDGPU_PCIE_GEN2;
947 return AMDGPU_PCIE_GEN1;
949 return AMDGPU_PCIE_GEN1;
952 u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
956 switch (asic_lanes) {
959 return default_lanes;
975 u8 amdgpu_encode_pci_lane_width(u32 lanes)
977 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
982 return encoded_lanes[lanes];
985 struct amd_vce_state*
986 amdgpu_get_vce_clock_state(void *handle, u32 idx)
988 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
990 if (idx < adev->pm.dpm.num_of_vce_states)
991 return &adev->pm.dpm.vce_states[idx];