Commit | Line | Data |
---|---|---|
ec437d71 HR |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* | |
3 | * amd-pstate.c - AMD Processor P-state Frequency Driver | |
4 | * | |
5 | * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved. | |
6 | * | |
7 | * Author: Huang Rui <ray.huang@amd.com> | |
8 | * | |
9 | * AMD P-State introduces a new CPU performance scaling design for AMD | |
10 | * processors using the ACPI Collaborative Performance and Power Control (CPPC) | |
11 | * feature which works with the AMD SMU firmware providing a finer grained | |
12 | * frequency control range. It is to replace the legacy ACPI P-States control, | |
13 | * allows a flexible, low-latency interface for the Linux kernel to directly | |
14 | * communicate the performance hints to hardware. | |
15 | * | |
16 | * AMD P-State is supported on recent AMD Zen base CPU series include some of | |
17 | * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD | |
18 | * P-State supported system. And there are two types of hardware implementations | |
19 | * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution. | |
20 | * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types. | |
21 | */ | |
22 | ||
23 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
24 | ||
25 | #include <linux/kernel.h> | |
26 | #include <linux/module.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/smp.h> | |
29 | #include <linux/sched.h> | |
30 | #include <linux/cpufreq.h> | |
31 | #include <linux/compiler.h> | |
32 | #include <linux/dmi.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/acpi.h> | |
35 | #include <linux/io.h> | |
36 | #include <linux/delay.h> | |
37 | #include <linux/uaccess.h> | |
38 | #include <linux/static_call.h> | |
39 | ||
40 | #include <acpi/processor.h> | |
41 | #include <acpi/cppc_acpi.h> | |
42 | ||
43 | #include <asm/msr.h> | |
44 | #include <asm/processor.h> | |
45 | #include <asm/cpufeature.h> | |
46 | #include <asm/cpu_device_id.h> | |
47 | ||
48 | #define AMD_PSTATE_TRANSITION_LATENCY 0x20000 | |
49 | #define AMD_PSTATE_TRANSITION_DELAY 500 | |
50 | ||
e059c184 HR |
51 | /* |
52 | * TODO: We need more time to fine tune processors with shared memory solution | |
53 | * with community together. | |
54 | * | |
55 | * There are some performance drops on the CPU benchmarks which reports from | |
56 | * Suse. We are co-working with them to fine tune the shared memory solution. So | |
57 | * we disable it by default to go acpi-cpufreq on these processors and add a | |
58 | * module parameter to be able to enable it manually for debugging. | |
59 | */ | |
60 | static bool shared_mem = false; | |
61 | module_param(shared_mem, bool, 0444); | |
62 | MODULE_PARM_DESC(shared_mem, | |
63 | "enable amd-pstate on processors with shared memory solution (false = disabled (default), true = enabled)"); | |
64 | ||
ec437d71 HR |
65 | static struct cpufreq_driver amd_pstate_driver; |
66 | ||
67 | /** | |
68 | * struct amd_cpudata - private CPU data for AMD P-State | |
69 | * @cpu: CPU number | |
70 | * @cppc_req_cached: cached performance request hints | |
71 | * @highest_perf: the maximum performance an individual processor may reach, | |
72 | * assuming ideal conditions | |
73 | * @nominal_perf: the maximum sustained performance level of the processor, | |
74 | * assuming ideal operating conditions | |
75 | * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power | |
76 | * savings are achieved | |
77 | * @lowest_perf: the absolute lowest performance level of the processor | |
78 | * @max_freq: the frequency that mapped to highest_perf | |
79 | * @min_freq: the frequency that mapped to lowest_perf | |
80 | * @nominal_freq: the frequency that mapped to nominal_perf | |
81 | * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf | |
82 | * | |
83 | * The amd_cpudata is key private data for each CPU thread in AMD P-State, and | |
84 | * represents all the attributes and goals that AMD P-State requests at runtime. | |
85 | */ | |
86 | struct amd_cpudata { | |
87 | int cpu; | |
88 | ||
89 | u64 cppc_req_cached; | |
90 | ||
91 | u32 highest_perf; | |
92 | u32 nominal_perf; | |
93 | u32 lowest_nonlinear_perf; | |
94 | u32 lowest_perf; | |
95 | ||
96 | u32 max_freq; | |
97 | u32 min_freq; | |
98 | u32 nominal_freq; | |
99 | u32 lowest_nonlinear_freq; | |
100 | }; | |
101 | ||
e059c184 | 102 | static inline int pstate_enable(bool enable) |
ec437d71 HR |
103 | { |
104 | return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable); | |
105 | } | |
106 | ||
e059c184 HR |
107 | static int cppc_enable(bool enable) |
108 | { | |
109 | int cpu, ret = 0; | |
110 | ||
111 | for_each_present_cpu(cpu) { | |
112 | ret = cppc_set_enable(cpu, enable); | |
113 | if (ret) | |
114 | return ret; | |
115 | } | |
116 | ||
117 | return ret; | |
118 | } | |
119 | ||
120 | DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable); | |
121 | ||
122 | static inline int amd_pstate_enable(bool enable) | |
123 | { | |
124 | return static_call(amd_pstate_enable)(enable); | |
125 | } | |
126 | ||
127 | static int pstate_init_perf(struct amd_cpudata *cpudata) | |
ec437d71 HR |
128 | { |
129 | u64 cap1; | |
130 | ||
131 | int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, | |
132 | &cap1); | |
133 | if (ret) | |
134 | return ret; | |
135 | ||
136 | /* | |
137 | * TODO: Introduce AMD specific power feature. | |
138 | * | |
139 | * CPPC entry doesn't indicate the highest performance in some ASICs. | |
140 | */ | |
141 | WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); | |
142 | ||
143 | WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); | |
144 | WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); | |
145 | WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1)); | |
146 | ||
147 | return 0; | |
148 | } | |
149 | ||
e059c184 HR |
150 | static int cppc_init_perf(struct amd_cpudata *cpudata) |
151 | { | |
152 | struct cppc_perf_caps cppc_perf; | |
153 | ||
154 | int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); | |
155 | if (ret) | |
156 | return ret; | |
157 | ||
158 | WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); | |
159 | ||
160 | WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); | |
161 | WRITE_ONCE(cpudata->lowest_nonlinear_perf, | |
162 | cppc_perf.lowest_nonlinear_perf); | |
163 | WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); | |
164 | ||
165 | return 0; | |
166 | } | |
167 | ||
168 | DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); | |
169 | ||
170 | static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) | |
171 | { | |
172 | return static_call(amd_pstate_init_perf)(cpudata); | |
173 | } | |
174 | ||
175 | static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, | |
176 | u32 des_perf, u32 max_perf, bool fast_switch) | |
ec437d71 HR |
177 | { |
178 | if (fast_switch) | |
179 | wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached)); | |
180 | else | |
181 | wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, | |
182 | READ_ONCE(cpudata->cppc_req_cached)); | |
183 | } | |
184 | ||
e059c184 HR |
185 | static void cppc_update_perf(struct amd_cpudata *cpudata, |
186 | u32 min_perf, u32 des_perf, | |
187 | u32 max_perf, bool fast_switch) | |
188 | { | |
189 | struct cppc_perf_ctrls perf_ctrls; | |
190 | ||
191 | perf_ctrls.max_perf = max_perf; | |
192 | perf_ctrls.min_perf = min_perf; | |
193 | perf_ctrls.desired_perf = des_perf; | |
194 | ||
195 | cppc_set_perf(cpudata->cpu, &perf_ctrls); | |
196 | } | |
197 | ||
198 | DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); | |
199 | ||
200 | static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, | |
201 | u32 min_perf, u32 des_perf, | |
202 | u32 max_perf, bool fast_switch) | |
203 | { | |
204 | static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, | |
205 | max_perf, fast_switch); | |
206 | } | |
207 | ||
ec437d71 HR |
208 | static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, |
209 | u32 des_perf, u32 max_perf, bool fast_switch) | |
210 | { | |
211 | u64 prev = READ_ONCE(cpudata->cppc_req_cached); | |
212 | u64 value = prev; | |
213 | ||
214 | value &= ~AMD_CPPC_MIN_PERF(~0L); | |
215 | value |= AMD_CPPC_MIN_PERF(min_perf); | |
216 | ||
217 | value &= ~AMD_CPPC_DES_PERF(~0L); | |
218 | value |= AMD_CPPC_DES_PERF(des_perf); | |
219 | ||
220 | value &= ~AMD_CPPC_MAX_PERF(~0L); | |
221 | value |= AMD_CPPC_MAX_PERF(max_perf); | |
222 | ||
223 | if (value == prev) | |
224 | return; | |
225 | ||
226 | WRITE_ONCE(cpudata->cppc_req_cached, value); | |
227 | ||
228 | amd_pstate_update_perf(cpudata, min_perf, des_perf, | |
229 | max_perf, fast_switch); | |
230 | } | |
231 | ||
232 | static int amd_pstate_verify(struct cpufreq_policy_data *policy) | |
233 | { | |
234 | cpufreq_verify_within_cpu_limits(policy); | |
235 | ||
236 | return 0; | |
237 | } | |
238 | ||
239 | static int amd_pstate_target(struct cpufreq_policy *policy, | |
240 | unsigned int target_freq, | |
241 | unsigned int relation) | |
242 | { | |
243 | struct cpufreq_freqs freqs; | |
244 | struct amd_cpudata *cpudata = policy->driver_data; | |
245 | unsigned long max_perf, min_perf, des_perf, cap_perf; | |
246 | ||
247 | if (!cpudata->max_freq) | |
248 | return -ENODEV; | |
249 | ||
250 | cap_perf = READ_ONCE(cpudata->highest_perf); | |
251 | min_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); | |
252 | max_perf = cap_perf; | |
253 | ||
254 | freqs.old = policy->cur; | |
255 | freqs.new = target_freq; | |
256 | ||
257 | des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf, | |
258 | cpudata->max_freq); | |
259 | ||
260 | cpufreq_freq_transition_begin(policy, &freqs); | |
261 | amd_pstate_update(cpudata, min_perf, des_perf, | |
262 | max_perf, false); | |
263 | cpufreq_freq_transition_end(policy, &freqs, false); | |
264 | ||
265 | return 0; | |
266 | } | |
267 | ||
1d215f03 HR |
268 | static void amd_pstate_adjust_perf(unsigned int cpu, |
269 | unsigned long _min_perf, | |
270 | unsigned long target_perf, | |
271 | unsigned long capacity) | |
272 | { | |
273 | unsigned long max_perf, min_perf, des_perf, | |
274 | cap_perf, lowest_nonlinear_perf; | |
275 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | |
276 | struct amd_cpudata *cpudata = policy->driver_data; | |
277 | ||
278 | cap_perf = READ_ONCE(cpudata->highest_perf); | |
279 | lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); | |
280 | ||
281 | des_perf = cap_perf; | |
282 | if (target_perf < capacity) | |
283 | des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity); | |
284 | ||
285 | min_perf = READ_ONCE(cpudata->highest_perf); | |
286 | if (_min_perf < capacity) | |
287 | min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity); | |
288 | ||
289 | if (min_perf < lowest_nonlinear_perf) | |
290 | min_perf = lowest_nonlinear_perf; | |
291 | ||
292 | max_perf = cap_perf; | |
293 | if (max_perf < min_perf) | |
294 | max_perf = min_perf; | |
295 | ||
296 | des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); | |
297 | ||
298 | amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true); | |
299 | } | |
300 | ||
ec437d71 HR |
301 | static int amd_get_min_freq(struct amd_cpudata *cpudata) |
302 | { | |
303 | struct cppc_perf_caps cppc_perf; | |
304 | ||
305 | int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); | |
306 | if (ret) | |
307 | return ret; | |
308 | ||
309 | /* Switch to khz */ | |
310 | return cppc_perf.lowest_freq * 1000; | |
311 | } | |
312 | ||
313 | static int amd_get_max_freq(struct amd_cpudata *cpudata) | |
314 | { | |
315 | struct cppc_perf_caps cppc_perf; | |
316 | u32 max_perf, max_freq, nominal_freq, nominal_perf; | |
317 | u64 boost_ratio; | |
318 | ||
319 | int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); | |
320 | if (ret) | |
321 | return ret; | |
322 | ||
323 | nominal_freq = cppc_perf.nominal_freq; | |
324 | nominal_perf = READ_ONCE(cpudata->nominal_perf); | |
325 | max_perf = READ_ONCE(cpudata->highest_perf); | |
326 | ||
327 | boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT, | |
328 | nominal_perf); | |
329 | ||
330 | max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT; | |
331 | ||
332 | /* Switch to khz */ | |
333 | return max_freq * 1000; | |
334 | } | |
335 | ||
336 | static int amd_get_nominal_freq(struct amd_cpudata *cpudata) | |
337 | { | |
338 | struct cppc_perf_caps cppc_perf; | |
339 | ||
340 | int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); | |
341 | if (ret) | |
342 | return ret; | |
343 | ||
344 | /* Switch to khz */ | |
345 | return cppc_perf.nominal_freq * 1000; | |
346 | } | |
347 | ||
348 | static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata) | |
349 | { | |
350 | struct cppc_perf_caps cppc_perf; | |
351 | u32 lowest_nonlinear_freq, lowest_nonlinear_perf, | |
352 | nominal_freq, nominal_perf; | |
353 | u64 lowest_nonlinear_ratio; | |
354 | ||
355 | int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); | |
356 | if (ret) | |
357 | return ret; | |
358 | ||
359 | nominal_freq = cppc_perf.nominal_freq; | |
360 | nominal_perf = READ_ONCE(cpudata->nominal_perf); | |
361 | ||
362 | lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf; | |
363 | ||
364 | lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT, | |
365 | nominal_perf); | |
366 | ||
367 | lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT; | |
368 | ||
369 | /* Switch to khz */ | |
370 | return lowest_nonlinear_freq * 1000; | |
371 | } | |
372 | ||
373 | static int amd_pstate_cpu_init(struct cpufreq_policy *policy) | |
374 | { | |
375 | int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; | |
376 | struct device *dev; | |
377 | struct amd_cpudata *cpudata; | |
378 | ||
379 | dev = get_cpu_device(policy->cpu); | |
380 | if (!dev) | |
381 | return -ENODEV; | |
382 | ||
383 | cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL); | |
384 | if (!cpudata) | |
385 | return -ENOMEM; | |
386 | ||
387 | cpudata->cpu = policy->cpu; | |
388 | ||
389 | ret = amd_pstate_init_perf(cpudata); | |
390 | if (ret) | |
391 | goto free_cpudata; | |
392 | ||
393 | min_freq = amd_get_min_freq(cpudata); | |
394 | max_freq = amd_get_max_freq(cpudata); | |
395 | nominal_freq = amd_get_nominal_freq(cpudata); | |
396 | lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata); | |
397 | ||
398 | if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) { | |
399 | dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n", | |
400 | min_freq, max_freq); | |
401 | ret = -EINVAL; | |
402 | goto free_cpudata; | |
403 | } | |
404 | ||
405 | policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY; | |
406 | policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY; | |
407 | ||
408 | policy->min = min_freq; | |
409 | policy->max = max_freq; | |
410 | ||
411 | policy->cpuinfo.min_freq = min_freq; | |
412 | policy->cpuinfo.max_freq = max_freq; | |
413 | ||
414 | /* It will be updated by governor */ | |
415 | policy->cur = policy->cpuinfo.min_freq; | |
416 | ||
e059c184 HR |
417 | if (boot_cpu_has(X86_FEATURE_CPPC)) |
418 | policy->fast_switch_possible = true; | |
1d215f03 | 419 | |
ec437d71 HR |
420 | /* Initial processor data capability frequencies */ |
421 | cpudata->max_freq = max_freq; | |
422 | cpudata->min_freq = min_freq; | |
423 | cpudata->nominal_freq = nominal_freq; | |
424 | cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq; | |
425 | ||
426 | policy->driver_data = cpudata; | |
427 | ||
428 | return 0; | |
429 | ||
430 | free_cpudata: | |
431 | kfree(cpudata); | |
432 | return ret; | |
433 | } | |
434 | ||
435 | static int amd_pstate_cpu_exit(struct cpufreq_policy *policy) | |
436 | { | |
437 | struct amd_cpudata *cpudata; | |
438 | ||
439 | cpudata = policy->driver_data; | |
440 | ||
441 | kfree(cpudata); | |
442 | ||
443 | return 0; | |
444 | } | |
445 | ||
446 | static struct cpufreq_driver amd_pstate_driver = { | |
447 | .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, | |
448 | .verify = amd_pstate_verify, | |
449 | .target = amd_pstate_target, | |
450 | .init = amd_pstate_cpu_init, | |
451 | .exit = amd_pstate_cpu_exit, | |
452 | .name = "amd-pstate", | |
453 | }; | |
454 | ||
455 | static int __init amd_pstate_init(void) | |
456 | { | |
457 | int ret; | |
458 | ||
459 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | |
460 | return -ENODEV; | |
461 | ||
462 | if (!acpi_cpc_valid()) { | |
463 | pr_debug("the _CPC object is not present in SBIOS\n"); | |
464 | return -ENODEV; | |
465 | } | |
466 | ||
467 | /* don't keep reloading if cpufreq_driver exists */ | |
468 | if (cpufreq_get_current_driver()) | |
469 | return -EEXIST; | |
470 | ||
471 | /* capability check */ | |
e059c184 HR |
472 | if (boot_cpu_has(X86_FEATURE_CPPC)) { |
473 | pr_debug("AMD CPPC MSR based functionality is supported\n"); | |
474 | amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf; | |
475 | } else if (shared_mem) { | |
476 | static_call_update(amd_pstate_enable, cppc_enable); | |
477 | static_call_update(amd_pstate_init_perf, cppc_init_perf); | |
478 | static_call_update(amd_pstate_update_perf, cppc_update_perf); | |
479 | } else { | |
480 | pr_info("This processor supports shared memory solution, you can enable it with amd_pstate.shared_mem=1\n"); | |
ec437d71 HR |
481 | return -ENODEV; |
482 | } | |
483 | ||
484 | /* enable amd pstate feature */ | |
485 | ret = amd_pstate_enable(true); | |
486 | if (ret) { | |
487 | pr_err("failed to enable amd-pstate with return %d\n", ret); | |
488 | return ret; | |
489 | } | |
490 | ||
491 | ret = cpufreq_register_driver(&amd_pstate_driver); | |
492 | if (ret) | |
493 | pr_err("failed to register amd_pstate_driver with return %d\n", | |
494 | ret); | |
495 | ||
496 | return ret; | |
497 | } | |
498 | ||
499 | static void __exit amd_pstate_exit(void) | |
500 | { | |
501 | cpufreq_unregister_driver(&amd_pstate_driver); | |
502 | ||
503 | amd_pstate_enable(false); | |
504 | } | |
505 | ||
506 | module_init(amd_pstate_init); | |
507 | module_exit(amd_pstate_exit); | |
508 | ||
509 | MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>"); | |
510 | MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver"); | |
511 | MODULE_LICENSE("GPL"); |