Merge tag 'platform-drivers-x86-v6.11-6' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / cpufreq / amd-pstate-ut.c
CommitLineData
2dfb010d 1// SPDX-License-Identifier: GPL-2.0-or-later
14eb1c96
ML
2/*
3 * AMD Processor P-state Frequency Driver Unit Test
4 *
5 * Copyright (C) 2022 Advanced Micro Devices, Inc. All Rights Reserved.
6 *
7 * Author: Meng Li <li.meng@amd.com>
8 *
9 * The AMD P-State Unit Test is a test module for testing the amd-pstate
10 * driver. 1) It can help all users to verify their processor support
11 * (SBIOS/Firmware or Hardware). 2) Kernel can have a basic function
12 * test to avoid the kernel regression during the update. 3) We can
13 * introduce more functional or performance tests to align the result
14 * together, it will benefit power and performance scale optimization.
15 *
16 * This driver implements basic framework with plans to enhance it with
17 * additional test cases to improve the depth and coverage of the test.
18 *
19 * See Documentation/admin-guide/pm/amd-pstate.rst Unit Tests for
20 * amd-pstate to get more detail.
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/fs.h>
14eb1c96
ML
29
30#include <acpi/cppc_acpi.h>
31
779b8a14
AB
32#include "amd-pstate.h"
33
14eb1c96
ML
34/*
35 * Abbreviations:
36 * amd_pstate_ut: used as a shortform for AMD P-State unit test.
37 * It helps to keep variable names smaller, simpler
38 */
39enum amd_pstate_ut_result {
40 AMD_PSTATE_UT_RESULT_PASS,
41 AMD_PSTATE_UT_RESULT_FAIL,
42};
43
44struct amd_pstate_ut_struct {
45 const char *name;
46 void (*func)(u32 index);
47 enum amd_pstate_ut_result result;
48};
49
50/*
51 * Kernel module for testing the AMD P-State unit test
52 */
53static void amd_pstate_ut_acpi_cpc_valid(u32 index);
54static void amd_pstate_ut_check_enabled(u32 index);
55static void amd_pstate_ut_check_perf(u32 index);
56static void amd_pstate_ut_check_freq(u32 index);
57
58static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
59 {"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid },
60 {"amd_pstate_ut_check_enabled", amd_pstate_ut_check_enabled },
61 {"amd_pstate_ut_check_perf", amd_pstate_ut_check_perf },
62 {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq }
63};
64
65static bool get_shared_mem(void)
66{
67 bool result = false;
14eb1c96 68
8d6e5e82
SS
69 if (!boot_cpu_has(X86_FEATURE_CPPC))
70 result = true;
14eb1c96
ML
71
72 return result;
73}
74
75/*
76 * check the _CPC object is present in SBIOS.
77 */
78static void amd_pstate_ut_acpi_cpc_valid(u32 index)
79{
80 if (acpi_cpc_valid())
81 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
82 else {
83 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
84 pr_err("%s the _CPC object is not present in SBIOS!\n", __func__);
85 }
86}
87
88static void amd_pstate_ut_pstate_enable(u32 index)
89{
90 int ret = 0;
91 u64 cppc_enable = 0;
92
93 ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
94 if (ret) {
95 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
96 pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
97 return;
98 }
99 if (cppc_enable)
100 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
101 else {
102 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
103 pr_err("%s amd pstate must be enabled!\n", __func__);
104 }
105}
106
107/*
108 * check if amd pstate is enabled
109 */
110static void amd_pstate_ut_check_enabled(u32 index)
111{
112 if (get_shared_mem())
113 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
114 else
115 amd_pstate_ut_pstate_enable(index);
116}
117
118/*
119 * check if performance values are reasonable.
120 * highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0
121 */
122static void amd_pstate_ut_check_perf(u32 index)
123{
124 int cpu = 0, ret = 0;
125 u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0;
126 u64 cap1 = 0;
127 struct cppc_perf_caps cppc_perf;
128 struct cpufreq_policy *policy = NULL;
129 struct amd_cpudata *cpudata = NULL;
130
14eb1c96
ML
131 for_each_possible_cpu(cpu) {
132 policy = cpufreq_cpu_get(cpu);
133 if (!policy)
134 break;
135 cpudata = policy->driver_data;
136
137 if (get_shared_mem()) {
138 ret = cppc_get_perf_caps(cpu, &cppc_perf);
139 if (ret) {
140 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
141 pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
60dd2838 142 goto skip_test;
14eb1c96
ML
143 }
144
d3dec5bb 145 highest_perf = cppc_perf.highest_perf;
14eb1c96
ML
146 nominal_perf = cppc_perf.nominal_perf;
147 lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
148 lowest_perf = cppc_perf.lowest_perf;
149 } else {
150 ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
151 if (ret) {
152 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
153 pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
60dd2838 154 goto skip_test;
14eb1c96
ML
155 }
156
d3dec5bb 157 highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
14eb1c96
ML
158 nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1);
159 lowest_nonlinear_perf = AMD_CPPC_LOWNONLIN_PERF(cap1);
160 lowest_perf = AMD_CPPC_LOWEST_PERF(cap1);
161 }
162
9983a9cd
ML
163 if (highest_perf != READ_ONCE(cpudata->highest_perf) && !cpudata->hw_prefcore) {
164 pr_err("%s cpu%d highest=%d %d highest perf doesn't match\n",
165 __func__, cpu, highest_perf, cpudata->highest_perf);
166 goto skip_test;
167 }
168 if ((nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
14eb1c96
ML
169 (lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) ||
170 (lowest_perf != READ_ONCE(cpudata->lowest_perf))) {
171 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
9983a9cd
ML
172 pr_err("%s cpu%d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
173 __func__, cpu, nominal_perf, cpudata->nominal_perf,
14eb1c96
ML
174 lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
175 lowest_perf, cpudata->lowest_perf);
60dd2838 176 goto skip_test;
14eb1c96
ML
177 }
178
179 if (!((highest_perf >= nominal_perf) &&
180 (nominal_perf > lowest_nonlinear_perf) &&
181 (lowest_nonlinear_perf > lowest_perf) &&
182 (lowest_perf > 0))) {
183 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
184 pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
185 __func__, cpu, highest_perf, nominal_perf,
186 lowest_nonlinear_perf, lowest_perf);
60dd2838 187 goto skip_test;
14eb1c96 188 }
60dd2838 189 cpufreq_cpu_put(policy);
14eb1c96
ML
190 }
191
192 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
60dd2838
SS
193 return;
194skip_test:
195 cpufreq_cpu_put(policy);
14eb1c96
ML
196}
197
198/*
199 * Check if frequency values are reasonable.
200 * max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0
201 * check max freq when set support boost mode.
202 */
203static void amd_pstate_ut_check_freq(u32 index)
204{
205 int cpu = 0;
206 struct cpufreq_policy *policy = NULL;
207 struct amd_cpudata *cpudata = NULL;
f21ab5ed 208 u32 nominal_freq_khz;
14eb1c96
ML
209
210 for_each_possible_cpu(cpu) {
211 policy = cpufreq_cpu_get(cpu);
212 if (!policy)
213 break;
214 cpudata = policy->driver_data;
215
f21ab5ed
DU
216 nominal_freq_khz = cpudata->nominal_freq*1000;
217 if (!((cpudata->max_freq >= nominal_freq_khz) &&
218 (nominal_freq_khz > cpudata->lowest_nonlinear_freq) &&
14eb1c96
ML
219 (cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
220 (cpudata->min_freq > 0))) {
221 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
222 pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
f21ab5ed 223 __func__, cpu, cpudata->max_freq, nominal_freq_khz,
14eb1c96 224 cpudata->lowest_nonlinear_freq, cpudata->min_freq);
60dd2838 225 goto skip_test;
14eb1c96
ML
226 }
227
228 if (cpudata->min_freq != policy->min) {
229 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
230 pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n",
231 __func__, cpu, cpudata->min_freq, policy->min);
60dd2838 232 goto skip_test;
14eb1c96
ML
233 }
234
235 if (cpudata->boost_supported) {
236 if ((policy->max == cpudata->max_freq) ||
f21ab5ed 237 (policy->max == nominal_freq_khz))
14eb1c96
ML
238 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
239 else {
240 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
241 pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
242 __func__, cpu, policy->max, cpudata->max_freq,
f21ab5ed 243 nominal_freq_khz);
60dd2838 244 goto skip_test;
14eb1c96
ML
245 }
246 } else {
247 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
248 pr_err("%s cpu%d must support boost!\n", __func__, cpu);
60dd2838 249 goto skip_test;
14eb1c96 250 }
60dd2838 251 cpufreq_cpu_put(policy);
14eb1c96
ML
252 }
253
254 amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
60dd2838
SS
255 return;
256skip_test:
257 cpufreq_cpu_put(policy);
14eb1c96
ML
258}
259
260static int __init amd_pstate_ut_init(void)
261{
262 u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases);
263
264 for (i = 0; i < arr_size; i++) {
265 amd_pstate_ut_cases[i].func(i);
266 switch (amd_pstate_ut_cases[i].result) {
267 case AMD_PSTATE_UT_RESULT_PASS:
268 pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name);
269 break;
270 case AMD_PSTATE_UT_RESULT_FAIL:
271 default:
272 pr_info("%-4d %-20s\t fail!\n", i+1, amd_pstate_ut_cases[i].name);
273 break;
274 }
275 }
276
277 return 0;
278}
279
280static void __exit amd_pstate_ut_exit(void)
281{
282}
283
284module_init(amd_pstate_ut_init);
285module_exit(amd_pstate_ut_exit);
286
287MODULE_AUTHOR("Meng Li <li.meng@amd.com>");
288MODULE_DESCRIPTION("AMD P-state driver Test module");
289MODULE_LICENSE("GPL");