Merge branch 'pm-cpufreq'
[linux-2.6-block.git] / drivers / cpufreq / amd-pstate.c
CommitLineData
ec437d71
HR
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * amd-pstate.c - AMD Processor P-state Frequency Driver
4 *
5 * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved.
6 *
7 * Author: Huang Rui <ray.huang@amd.com>
8 *
9 * AMD P-State introduces a new CPU performance scaling design for AMD
10 * processors using the ACPI Collaborative Performance and Power Control (CPPC)
11 * feature which works with the AMD SMU firmware providing a finer grained
12 * frequency control range. It is to replace the legacy ACPI P-States control,
13 * allows a flexible, low-latency interface for the Linux kernel to directly
14 * communicate the performance hints to hardware.
15 *
16 * AMD P-State is supported on recent AMD Zen base CPU series include some of
17 * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD
18 * P-State supported system. And there are two types of hardware implementations
19 * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution.
20 * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types.
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/smp.h>
29#include <linux/sched.h>
30#include <linux/cpufreq.h>
31#include <linux/compiler.h>
32#include <linux/dmi.h>
33#include <linux/slab.h>
34#include <linux/acpi.h>
35#include <linux/io.h>
36#include <linux/delay.h>
37#include <linux/uaccess.h>
38#include <linux/static_call.h>
f1375ec1 39#include <linux/amd-pstate.h>
f3a05239 40#include <linux/topology.h>
ec437d71
HR
41
42#include <acpi/processor.h>
43#include <acpi/cppc_acpi.h>
44
45#include <asm/msr.h>
46#include <asm/processor.h>
47#include <asm/cpufeature.h>
48#include <asm/cpu_device_id.h>
60e10f89 49#include "amd-pstate-trace.h"
ec437d71 50
ca08e46d
PY
51#define AMD_PSTATE_TRANSITION_LATENCY 20000
52#define AMD_PSTATE_TRANSITION_DELAY 1000
bf202e65
PY
53#define CPPC_HIGHEST_PERF_PERFORMANCE 196
54#define CPPC_HIGHEST_PERF_DEFAULT 166
ec437d71 55
e059c184
HR
56/*
57 * TODO: We need more time to fine tune processors with shared memory solution
58 * with community together.
59 *
60 * There are some performance drops on the CPU benchmarks which reports from
61 * Suse. We are co-working with them to fine tune the shared memory solution. So
62 * we disable it by default to go acpi-cpufreq on these processors and add a
63 * module parameter to be able to enable it manually for debugging.
64 */
ffa5096a 65static struct cpufreq_driver *current_pstate_driver;
ec437d71 66static struct cpufreq_driver amd_pstate_driver;
ffa5096a 67static struct cpufreq_driver amd_pstate_epp_driver;
c88ad30e 68static int cppc_state = AMD_PSTATE_UNDEFINED;
217e6778 69static bool cppc_enabled;
f3a05239 70static bool amd_pstate_prefcore = true;
eb8b6c36 71static struct quirk_entry *quirks;
36c5014e 72
ffa5096a
PY
73/*
74 * AMD Energy Preference Performance (EPP)
75 * The EPP is used in the CCLK DPM controller to drive
76 * the frequency that a core is going to operate during
77 * short periods of activity. EPP values will be utilized for
78 * different OS profiles (balanced, performance, power savings)
79 * display strings corresponding to EPP index in the
80 * energy_perf_strings[]
81 * index String
82 *-------------------------------------
83 * 0 default
84 * 1 performance
85 * 2 balance_performance
86 * 3 balance_power
87 * 4 power
88 */
89enum energy_perf_value_index {
90 EPP_INDEX_DEFAULT = 0,
91 EPP_INDEX_PERFORMANCE,
92 EPP_INDEX_BALANCE_PERFORMANCE,
93 EPP_INDEX_BALANCE_POWERSAVE,
94 EPP_INDEX_POWERSAVE,
95};
96
97static const char * const energy_perf_strings[] = {
98 [EPP_INDEX_DEFAULT] = "default",
99 [EPP_INDEX_PERFORMANCE] = "performance",
100 [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
101 [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
102 [EPP_INDEX_POWERSAVE] = "power",
103 NULL
104};
105
106static unsigned int epp_values[] = {
107 [EPP_INDEX_DEFAULT] = 0,
108 [EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE,
109 [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
110 [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
111 [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
112 };
113
3ca7bc81
WK
114typedef int (*cppc_mode_transition_fn)(int);
115
eb8b6c36
PY
116static struct quirk_entry quirk_amd_7k62 = {
117 .nominal_freq = 2600,
118 .lowest_freq = 550,
119};
120
121static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
122{
123 /**
124 * match the broken bios for family 17h processor support CPPC V2
125 * broken BIOS lack of nominal_freq and lowest_freq capabilities
126 * definition in ACPI tables
127 */
128 if (boot_cpu_has(X86_FEATURE_ZEN2)) {
129 quirks = dmi->driver_data;
130 pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
131 return 1;
132 }
133
134 return 0;
135}
136
137static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = {
138 {
139 .callback = dmi_matched_7k62_bios_bug,
140 .ident = "AMD EPYC 7K62",
141 .matches = {
142 DMI_MATCH(DMI_BIOS_VERSION, "5.14"),
143 DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"),
144 },
145 .driver_data = &quirk_amd_7k62,
146 },
147 {}
148};
149MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table);
150
36c5014e
WK
151static inline int get_mode_idx_from_str(const char *str, size_t size)
152{
153 int i;
154
155 for (i=0; i < AMD_PSTATE_MAX; i++) {
156 if (!strncmp(str, amd_pstate_mode_string[i], size))
157 return i;
158 }
159 return -EINVAL;
160}
ec437d71 161
ffa5096a
PY
162static DEFINE_MUTEX(amd_pstate_limits_lock);
163static DEFINE_MUTEX(amd_pstate_driver_lock);
164
165static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
166{
167 u64 epp;
168 int ret;
169
170 if (boot_cpu_has(X86_FEATURE_CPPC)) {
171 if (!cppc_req_cached) {
172 epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
173 &cppc_req_cached);
174 if (epp)
175 return epp;
176 }
177 epp = (cppc_req_cached >> 24) & 0xFF;
178 } else {
179 ret = cppc_get_epp_perf(cpudata->cpu, &epp);
180 if (ret < 0) {
181 pr_debug("Could not retrieve energy perf value (%d)\n", ret);
182 return -EIO;
183 }
184 }
185
186 return (s16)(epp & 0xff);
187}
188
189static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
190{
191 s16 epp;
192 int index = -EINVAL;
193
194 epp = amd_pstate_get_epp(cpudata, 0);
195 if (epp < 0)
196 return epp;
197
198 switch (epp) {
199 case AMD_CPPC_EPP_PERFORMANCE:
200 index = EPP_INDEX_PERFORMANCE;
201 break;
202 case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
203 index = EPP_INDEX_BALANCE_PERFORMANCE;
204 break;
205 case AMD_CPPC_EPP_BALANCE_POWERSAVE:
206 index = EPP_INDEX_BALANCE_POWERSAVE;
207 break;
208 case AMD_CPPC_EPP_POWERSAVE:
209 index = EPP_INDEX_POWERSAVE;
210 break;
211 default:
212 break;
213 }
214
215 return index;
216}
217
218static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
219{
220 int ret;
221 struct cppc_perf_ctrls perf_ctrls;
222
223 if (boot_cpu_has(X86_FEATURE_CPPC)) {
224 u64 value = READ_ONCE(cpudata->cppc_req_cached);
225
226 value &= ~GENMASK_ULL(31, 24);
227 value |= (u64)epp << 24;
228 WRITE_ONCE(cpudata->cppc_req_cached, value);
229
230 ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
231 if (!ret)
232 cpudata->epp_cached = epp;
233 } else {
234 perf_ctrls.energy_perf = epp;
235 ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
236 if (ret) {
237 pr_debug("failed to set energy perf value (%d)\n", ret);
238 return ret;
239 }
240 cpudata->epp_cached = epp;
241 }
242
243 return ret;
244}
245
246static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
247 int pref_index)
248{
249 int epp = -EINVAL;
250 int ret;
251
252 if (!pref_index) {
253 pr_debug("EPP pref_index is invalid\n");
254 return -EINVAL;
255 }
256
257 if (epp == -EINVAL)
258 epp = epp_values[pref_index];
259
260 if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
261 pr_debug("EPP cannot be set under performance policy\n");
262 return -EBUSY;
263 }
264
265 ret = amd_pstate_set_epp(cpudata, epp);
266
267 return ret;
268}
269
e059c184 270static inline int pstate_enable(bool enable)
ec437d71 271{
217e6778
WK
272 int ret, cpu;
273 unsigned long logical_proc_id_mask = 0;
274
275 if (enable == cppc_enabled)
276 return 0;
277
278 for_each_present_cpu(cpu) {
279 unsigned long logical_id = topology_logical_die_id(cpu);
280
281 if (test_bit(logical_id, &logical_proc_id_mask))
282 continue;
283
284 set_bit(logical_id, &logical_proc_id_mask);
285
286 ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE,
287 enable);
288 if (ret)
289 return ret;
290 }
291
292 cppc_enabled = enable;
293 return 0;
ec437d71
HR
294}
295
e059c184
HR
296static int cppc_enable(bool enable)
297{
298 int cpu, ret = 0;
ffa5096a 299 struct cppc_perf_ctrls perf_ctrls;
e059c184 300
217e6778
WK
301 if (enable == cppc_enabled)
302 return 0;
303
e059c184
HR
304 for_each_present_cpu(cpu) {
305 ret = cppc_set_enable(cpu, enable);
306 if (ret)
307 return ret;
ffa5096a
PY
308
309 /* Enable autonomous mode for EPP */
310 if (cppc_state == AMD_PSTATE_ACTIVE) {
311 /* Set desired perf as zero to allow EPP firmware control */
312 perf_ctrls.desired_perf = 0;
313 ret = cppc_set_perf(cpu, &perf_ctrls);
314 if (ret)
315 return ret;
316 }
e059c184
HR
317 }
318
217e6778 319 cppc_enabled = enable;
e059c184
HR
320 return ret;
321}
322
323DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
324
325static inline int amd_pstate_enable(bool enable)
326{
327 return static_call(amd_pstate_enable)(enable);
328}
329
bf202e65
PY
330static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata)
331{
332 struct cpuinfo_x86 *c = &cpu_data(0);
333
334 /*
335 * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
336 * the highest performance level is set to 196.
337 * https://bugzilla.kernel.org/show_bug.cgi?id=218759
338 */
339 if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f))
340 return CPPC_HIGHEST_PERF_PERFORMANCE;
341
342 return CPPC_HIGHEST_PERF_DEFAULT;
343}
344
e059c184 345static int pstate_init_perf(struct amd_cpudata *cpudata)
ec437d71
HR
346{
347 u64 cap1;
bedadcfb 348 u32 highest_perf;
ec437d71
HR
349
350 int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
351 &cap1);
352 if (ret)
353 return ret;
354
f3a05239
ML
355 /* For platforms that do not support the preferred core feature, the
356 * highest_pef may be configured with 166 or 255, to avoid max frequency
357 * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as
358 * the default max perf.
ec437d71 359 */
f3a05239 360 if (cpudata->hw_prefcore)
bf202e65 361 highest_perf = amd_pstate_highest_perf_set(cpudata);
f3a05239 362 else
bedadcfb
PY
363 highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
364
365 WRITE_ONCE(cpudata->highest_perf, highest_perf);
febab20c 366 WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
ec437d71
HR
367 WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
368 WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
369 WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
e571a5e2 370 WRITE_ONCE(cpudata->prefcore_ranking, AMD_CPPC_HIGHEST_PERF(cap1));
febab20c 371 WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
ec437d71
HR
372 return 0;
373}
374
e059c184
HR
375static int cppc_init_perf(struct amd_cpudata *cpudata)
376{
377 struct cppc_perf_caps cppc_perf;
bedadcfb 378 u32 highest_perf;
e059c184
HR
379
380 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
381 if (ret)
382 return ret;
383
f3a05239 384 if (cpudata->hw_prefcore)
bf202e65 385 highest_perf = amd_pstate_highest_perf_set(cpudata);
f3a05239 386 else
bedadcfb
PY
387 highest_perf = cppc_perf.highest_perf;
388
389 WRITE_ONCE(cpudata->highest_perf, highest_perf);
febab20c 390 WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
e059c184
HR
391 WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
392 WRITE_ONCE(cpudata->lowest_nonlinear_perf,
393 cppc_perf.lowest_nonlinear_perf);
394 WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
e571a5e2 395 WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf);
febab20c 396 WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
e059c184 397
2dd6d0eb
WK
398 if (cppc_state == AMD_PSTATE_ACTIVE)
399 return 0;
400
401 ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf);
402 if (ret) {
403 pr_warn("failed to get auto_sel, ret: %d\n", ret);
404 return 0;
405 }
406
407 ret = cppc_set_auto_sel(cpudata->cpu,
408 (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
409
410 if (ret)
411 pr_warn("failed to set auto_sel, ret: %d\n", ret);
412
413 return ret;
e059c184
HR
414}
415
416DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
417
418static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
419{
420 return static_call(amd_pstate_init_perf)(cpudata);
421}
422
423static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
424 u32 des_perf, u32 max_perf, bool fast_switch)
ec437d71
HR
425{
426 if (fast_switch)
427 wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
428 else
429 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
430 READ_ONCE(cpudata->cppc_req_cached));
431}
432
e059c184
HR
433static void cppc_update_perf(struct amd_cpudata *cpudata,
434 u32 min_perf, u32 des_perf,
435 u32 max_perf, bool fast_switch)
436{
437 struct cppc_perf_ctrls perf_ctrls;
438
439 perf_ctrls.max_perf = max_perf;
440 perf_ctrls.min_perf = min_perf;
441 perf_ctrls.desired_perf = des_perf;
442
443 cppc_set_perf(cpudata->cpu, &perf_ctrls);
444}
445
446DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
447
448static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
449 u32 min_perf, u32 des_perf,
450 u32 max_perf, bool fast_switch)
451{
452 static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
453 max_perf, fast_switch);
454}
455
23c296fb
JS
456static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
457{
458 u64 aperf, mperf, tsc;
459 unsigned long flags;
460
461 local_irq_save(flags);
462 rdmsrl(MSR_IA32_APERF, aperf);
463 rdmsrl(MSR_IA32_MPERF, mperf);
464 tsc = rdtsc();
465
466 if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
467 local_irq_restore(flags);
468 return false;
469 }
470
471 local_irq_restore(flags);
472
473 cpudata->cur.aperf = aperf;
474 cpudata->cur.mperf = mperf;
475 cpudata->cur.tsc = tsc;
476 cpudata->cur.aperf -= cpudata->prev.aperf;
477 cpudata->cur.mperf -= cpudata->prev.mperf;
478 cpudata->cur.tsc -= cpudata->prev.tsc;
479
480 cpudata->prev.aperf = aperf;
481 cpudata->prev.mperf = mperf;
482 cpudata->prev.tsc = tsc;
483
484 cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf);
485
486 return true;
487}
488
ec437d71 489static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
2dd6d0eb 490 u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
ec437d71
HR
491{
492 u64 prev = READ_ONCE(cpudata->cppc_req_cached);
493 u64 value = prev;
494
febab20c
WK
495 min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
496 cpudata->max_limit_perf);
497 max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
498 cpudata->max_limit_perf);
0e9a8638 499 des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
2dd6d0eb
WK
500
501 if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
502 min_perf = des_perf;
503 des_perf = 0;
504 }
505
ec437d71
HR
506 value &= ~AMD_CPPC_MIN_PERF(~0L);
507 value |= AMD_CPPC_MIN_PERF(min_perf);
508
509 value &= ~AMD_CPPC_DES_PERF(~0L);
510 value |= AMD_CPPC_DES_PERF(des_perf);
511
512 value &= ~AMD_CPPC_MAX_PERF(~0L);
513 value |= AMD_CPPC_MAX_PERF(max_perf);
514
23c296fb
JS
515 if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
516 trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
517 cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
518 cpudata->cpu, (value != prev), fast_switch);
519 }
60e10f89 520
ec437d71
HR
521 if (value == prev)
522 return;
523
524 WRITE_ONCE(cpudata->cppc_req_cached, value);
525
526 amd_pstate_update_perf(cpudata, min_perf, des_perf,
527 max_perf, fast_switch);
528}
529
530static int amd_pstate_verify(struct cpufreq_policy_data *policy)
531{
532 cpufreq_verify_within_cpu_limits(policy);
533
534 return 0;
535}
536
febab20c
WK
537static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
538{
8164f743 539 u32 max_limit_perf, min_limit_perf, lowest_perf;
febab20c
WK
540 struct amd_cpudata *cpudata = policy->driver_data;
541
542 max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
543 min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
544
8164f743
ML
545 lowest_perf = READ_ONCE(cpudata->lowest_perf);
546 if (min_limit_perf < lowest_perf)
547 min_limit_perf = lowest_perf;
548
549 if (max_limit_perf < min_limit_perf)
550 max_limit_perf = min_limit_perf;
551
febab20c
WK
552 WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
553 WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
554 WRITE_ONCE(cpudata->max_limit_freq, policy->max);
555 WRITE_ONCE(cpudata->min_limit_freq, policy->min);
556
557 return 0;
558}
559
4badf2eb
GS
560static int amd_pstate_update_freq(struct cpufreq_policy *policy,
561 unsigned int target_freq, bool fast_switch)
ec437d71
HR
562{
563 struct cpufreq_freqs freqs;
564 struct amd_cpudata *cpudata = policy->driver_data;
565 unsigned long max_perf, min_perf, des_perf, cap_perf;
566
567 if (!cpudata->max_freq)
568 return -ENODEV;
569
febab20c
WK
570 if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
571 amd_pstate_update_min_max_limit(policy);
572
ec437d71 573 cap_perf = READ_ONCE(cpudata->highest_perf);
b185c505 574 min_perf = READ_ONCE(cpudata->lowest_perf);
ec437d71
HR
575 max_perf = cap_perf;
576
577 freqs.old = policy->cur;
578 freqs.new = target_freq;
579
580 des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
581 cpudata->max_freq);
582
4badf2eb
GS
583 WARN_ON(fast_switch && !policy->fast_switch_enabled);
584 /*
585 * If fast_switch is desired, then there aren't any registered
586 * transition notifiers. See comment for
587 * cpufreq_enable_fast_switch().
588 */
589 if (!fast_switch)
590 cpufreq_freq_transition_begin(policy, &freqs);
591
ec437d71 592 amd_pstate_update(cpudata, min_perf, des_perf,
4badf2eb
GS
593 max_perf, fast_switch, policy->governor->flags);
594
595 if (!fast_switch)
596 cpufreq_freq_transition_end(policy, &freqs, false);
ec437d71
HR
597
598 return 0;
599}
600
4badf2eb
GS
601static int amd_pstate_target(struct cpufreq_policy *policy,
602 unsigned int target_freq,
603 unsigned int relation)
604{
605 return amd_pstate_update_freq(policy, target_freq, false);
606}
607
608static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
609 unsigned int target_freq)
610{
bb87be26
GS
611 if (!amd_pstate_update_freq(policy, target_freq, true))
612 return target_freq;
613 return policy->cur;
4badf2eb
GS
614}
615
1d215f03
HR
616static void amd_pstate_adjust_perf(unsigned int cpu,
617 unsigned long _min_perf,
618 unsigned long target_perf,
619 unsigned long capacity)
620{
621 unsigned long max_perf, min_perf, des_perf,
3bf8c630 622 cap_perf, lowest_nonlinear_perf, max_freq;
1d215f03
HR
623 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
624 struct amd_cpudata *cpudata = policy->driver_data;
3bf8c630 625 unsigned int target_freq;
1d215f03 626
febab20c
WK
627 if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
628 amd_pstate_update_min_max_limit(policy);
629
630
1d215f03
HR
631 cap_perf = READ_ONCE(cpudata->highest_perf);
632 lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
3bf8c630 633 max_freq = READ_ONCE(cpudata->max_freq);
1d215f03
HR
634
635 des_perf = cap_perf;
636 if (target_perf < capacity)
637 des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
638
b26ffbf8 639 min_perf = READ_ONCE(cpudata->lowest_perf);
1d215f03
HR
640 if (_min_perf < capacity)
641 min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
642
643 if (min_perf < lowest_nonlinear_perf)
644 min_perf = lowest_nonlinear_perf;
645
646 max_perf = cap_perf;
647 if (max_perf < min_perf)
648 max_perf = min_perf;
649
3bf8c630
WK
650 des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
651 target_freq = div_u64(des_perf * max_freq, max_perf);
652 policy->cur = target_freq;
653
2dd6d0eb
WK
654 amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
655 policy->governor->flags);
4f3085f8 656 cpufreq_cpu_put(policy);
1d215f03
HR
657}
658
41271016
HR
659static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
660{
661 struct amd_cpudata *cpudata = policy->driver_data;
662 int ret;
663
664 if (!cpudata->boost_supported) {
665 pr_err("Boost mode is not supported by this processor or SBIOS\n");
666 return -EINVAL;
667 }
668
669 if (state)
670 policy->cpuinfo.max_freq = cpudata->max_freq;
671 else
672 policy->cpuinfo.max_freq = cpudata->nominal_freq;
673
674 policy->max = policy->cpuinfo.max_freq;
675
676 ret = freq_qos_update_request(&cpudata->req[1],
677 policy->cpuinfo.max_freq);
678 if (ret < 0)
679 return ret;
680
681 return 0;
682}
683
684static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
685{
686 u32 highest_perf, nominal_perf;
687
688 highest_perf = READ_ONCE(cpudata->highest_perf);
689 nominal_perf = READ_ONCE(cpudata->nominal_perf);
690
691 if (highest_perf <= nominal_perf)
692 return;
693
694 cpudata->boost_supported = true;
ffa5096a 695 current_pstate_driver->boost_enabled = true;
41271016
HR
696}
697
919f4557
WK
698static void amd_perf_ctl_reset(unsigned int cpu)
699{
700 wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
701}
702
f3a05239
ML
703/*
704 * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
705 * due to locking, so queue the work for later.
706 */
707static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
708{
709 sched_set_itmt_support();
710}
711static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
712
713/*
714 * Get the highest performance register value.
715 * @cpu: CPU from which to get highest performance.
716 * @highest_perf: Return address.
717 *
718 * Return: 0 for success, -EIO otherwise.
719 */
720static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
721{
722 int ret;
723
724 if (boot_cpu_has(X86_FEATURE_CPPC)) {
725 u64 cap1;
726
727 ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
728 if (ret)
729 return ret;
730 WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
731 } else {
732 u64 cppc_highest_perf;
733
734 ret = cppc_get_highest_perf(cpu, &cppc_highest_perf);
735 if (ret)
736 return ret;
737 WRITE_ONCE(*highest_perf, cppc_highest_perf);
738 }
739
740 return (ret);
741}
742
743#define CPPC_MAX_PERF U8_MAX
744
745static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
746{
747 int ret, prio;
748 u32 highest_perf;
749
750 ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf);
751 if (ret)
752 return;
753
754 cpudata->hw_prefcore = true;
755 /* check if CPPC preferred core feature is enabled*/
756 if (highest_perf < CPPC_MAX_PERF)
757 prio = (int)highest_perf;
758 else {
759 pr_debug("AMD CPPC preferred core is unsupported!\n");
760 cpudata->hw_prefcore = false;
761 return;
762 }
763
764 if (!amd_pstate_prefcore)
765 return;
766
767 /*
768 * The priorities can be set regardless of whether or not
769 * sched_set_itmt_support(true) has been called and it is valid to
770 * update them at any time after it has been called.
771 */
772 sched_set_itmt_core_prio(prio, cpudata->cpu);
773
774 schedule_work(&sched_prefcore_work);
775}
776
e571a5e2
ML
777static void amd_pstate_update_limits(unsigned int cpu)
778{
779 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
780 struct amd_cpudata *cpudata = policy->driver_data;
781 u32 prev_high = 0, cur_high = 0;
782 int ret;
783 bool highest_perf_changed = false;
784
785 mutex_lock(&amd_pstate_driver_lock);
786 if ((!amd_pstate_prefcore) || (!cpudata->hw_prefcore))
787 goto free_cpufreq_put;
788
789 ret = amd_pstate_get_highest_perf(cpu, &cur_high);
790 if (ret)
791 goto free_cpufreq_put;
792
793 prev_high = READ_ONCE(cpudata->prefcore_ranking);
794 if (prev_high != cur_high) {
795 highest_perf_changed = true;
796 WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
797
798 if (cur_high < CPPC_MAX_PERF)
799 sched_set_itmt_core_prio((int)cur_high, cpu);
800 }
801
802free_cpufreq_put:
803 cpufreq_cpu_put(policy);
804
805 if (!highest_perf_changed)
806 cpufreq_update_policy(cpu);
807
808 mutex_unlock(&amd_pstate_driver_lock);
809}
810
5131a3ca 811/*
069a2bb8
PY
812 * Get pstate transition delay time from ACPI tables that firmware set
813 * instead of using hardcode value directly.
814 */
815static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
816{
817 u32 transition_delay_ns;
818
819 transition_delay_ns = cppc_get_transition_latency(cpu);
820 if (transition_delay_ns == CPUFREQ_ETERNAL)
821 return AMD_PSTATE_TRANSITION_DELAY;
822
823 return transition_delay_ns / NSEC_PER_USEC;
824}
825
5131a3ca 826/*
069a2bb8
PY
827 * Get pstate transition latency value from ACPI tables that firmware
828 * set instead of using hardcode value directly.
829 */
830static u32 amd_pstate_get_transition_latency(unsigned int cpu)
831{
832 u32 transition_latency;
833
834 transition_latency = cppc_get_transition_latency(cpu);
835 if (transition_latency == CPUFREQ_ETERNAL)
836 return AMD_PSTATE_TRANSITION_LATENCY;
837
838 return transition_latency;
839}
840
5131a3ca 841/*
5547c0eb
PY
842 * amd_pstate_init_freq: Initialize the max_freq, min_freq,
843 * nominal_freq and lowest_nonlinear_freq for
844 * the @cpudata object.
845 *
846 * Requires: highest_perf, lowest_perf, nominal_perf and
847 * lowest_nonlinear_perf members of @cpudata to be
848 * initialized.
849 *
850 * Returns 0 on success, non-zero value on failure.
851 */
852static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
853{
854 int ret;
855 u32 min_freq;
856 u32 highest_perf, max_freq;
857 u32 nominal_perf, nominal_freq;
858 u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
859 u32 boost_ratio, lowest_nonlinear_ratio;
860 struct cppc_perf_caps cppc_perf;
861
5547c0eb
PY
862 ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
863 if (ret)
864 return ret;
865
eb8b6c36
PY
866 if (quirks && quirks->lowest_freq)
867 min_freq = quirks->lowest_freq * 1000;
868 else
869 min_freq = cppc_perf.lowest_freq * 1000;
870
871 if (quirks && quirks->nominal_freq)
872 nominal_freq = quirks->nominal_freq ;
873 else
874 nominal_freq = cppc_perf.nominal_freq;
875
5547c0eb
PY
876 nominal_perf = READ_ONCE(cpudata->nominal_perf);
877
878 highest_perf = READ_ONCE(cpudata->highest_perf);
879 boost_ratio = div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
880 max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
881
882 lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
883 lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
884 nominal_perf);
885 lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
886
887 WRITE_ONCE(cpudata->min_freq, min_freq);
888 WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
889 WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
890 WRITE_ONCE(cpudata->max_freq, max_freq);
891
892 return 0;
893}
894
ec437d71
HR
895static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
896{
5c3fd1ed 897 int min_freq, max_freq, nominal_freq, ret;
ec437d71
HR
898 struct device *dev;
899 struct amd_cpudata *cpudata;
900
919f4557
WK
901 /*
902 * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
903 * which is ideal for initialization process.
904 */
905 amd_perf_ctl_reset(policy->cpu);
ec437d71
HR
906 dev = get_cpu_device(policy->cpu);
907 if (!dev)
908 return -ENODEV;
909
910 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
911 if (!cpudata)
912 return -ENOMEM;
913
914 cpudata->cpu = policy->cpu;
915
f3a05239
ML
916 amd_pstate_init_prefcore(cpudata);
917
ec437d71
HR
918 ret = amd_pstate_init_perf(cpudata);
919 if (ret)
41271016 920 goto free_cpudata1;
ec437d71 921
5547c0eb
PY
922 ret = amd_pstate_init_freq(cpudata);
923 if (ret)
924 goto free_cpudata1;
925
3cbbe887
GS
926 min_freq = READ_ONCE(cpudata->min_freq);
927 max_freq = READ_ONCE(cpudata->max_freq);
928 nominal_freq = READ_ONCE(cpudata->nominal_freq);
ec437d71 929
2ddb8a39
PY
930 if (min_freq <= 0 || max_freq <= 0 ||
931 nominal_freq <= 0 || min_freq > max_freq) {
932 dev_err(dev,
933 "min_freq(%d) or max_freq(%d) or nominal_freq (%d) value is incorrect, check _CPC in ACPI tables\n",
934 min_freq, max_freq, nominal_freq);
ec437d71 935 ret = -EINVAL;
41271016 936 goto free_cpudata1;
ec437d71
HR
937 }
938
069a2bb8
PY
939 policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
940 policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
ec437d71
HR
941
942 policy->min = min_freq;
943 policy->max = max_freq;
944
945 policy->cpuinfo.min_freq = min_freq;
946 policy->cpuinfo.max_freq = max_freq;
947
948 /* It will be updated by governor */
949 policy->cur = policy->cpuinfo.min_freq;
950
e059c184
HR
951 if (boot_cpu_has(X86_FEATURE_CPPC))
952 policy->fast_switch_possible = true;
1d215f03 953
41271016
HR
954 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
955 FREQ_QOS_MIN, policy->cpuinfo.min_freq);
956 if (ret < 0) {
957 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
958 goto free_cpudata1;
959 }
960
961 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1],
962 FREQ_QOS_MAX, policy->cpuinfo.max_freq);
963 if (ret < 0) {
964 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
965 goto free_cpudata2;
966 }
967
febab20c
WK
968 cpudata->max_limit_freq = max_freq;
969 cpudata->min_limit_freq = min_freq;
ec437d71
HR
970
971 policy->driver_data = cpudata;
972
41271016 973 amd_pstate_boost_init(cpudata);
abd61c08
PY
974 if (!current_pstate_driver->adjust_perf)
975 current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
41271016 976
ec437d71
HR
977 return 0;
978
41271016
HR
979free_cpudata2:
980 freq_qos_remove_request(&cpudata->req[0]);
981free_cpudata1:
ec437d71
HR
982 kfree(cpudata);
983 return ret;
984}
985
986static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
987{
4f59540c 988 struct amd_cpudata *cpudata = policy->driver_data;
ec437d71 989
41271016
HR
990 freq_qos_remove_request(&cpudata->req[1]);
991 freq_qos_remove_request(&cpudata->req[0]);
4badf2eb 992 policy->fast_switch_possible = false;
ec437d71
HR
993 kfree(cpudata);
994
995 return 0;
996}
997
b376471f
JS
998static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
999{
1000 int ret;
1001
1002 ret = amd_pstate_enable(true);
1003 if (ret)
1004 pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
1005
1006 return ret;
1007}
1008
1009static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
1010{
1011 int ret;
1012
1013 ret = amd_pstate_enable(false);
1014 if (ret)
1015 pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
1016
1017 return ret;
1018}
1019
ec4e3326
HR
1020/* Sysfs attributes */
1021
1022/*
1023 * This frequency is to indicate the maximum hardware frequency.
1024 * If boost is not active but supported, the frequency will be larger than the
1025 * one in cpuinfo.
1026 */
1027static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
1028 char *buf)
1029{
1030 int max_freq;
4f59540c 1031 struct amd_cpudata *cpudata = policy->driver_data;
ec4e3326 1032
3cbbe887 1033 max_freq = READ_ONCE(cpudata->max_freq);
ec4e3326
HR
1034 if (max_freq < 0)
1035 return max_freq;
1036
3ec32b6d 1037 return sysfs_emit(buf, "%u\n", max_freq);
ec4e3326
HR
1038}
1039
1040static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
1041 char *buf)
1042{
1043 int freq;
4f59540c 1044 struct amd_cpudata *cpudata = policy->driver_data;
ec4e3326 1045
3cbbe887 1046 freq = READ_ONCE(cpudata->lowest_nonlinear_freq);
ec4e3326
HR
1047 if (freq < 0)
1048 return freq;
1049
3ec32b6d 1050 return sysfs_emit(buf, "%u\n", freq);
ec4e3326
HR
1051}
1052
3ad7fde1
HR
1053/*
1054 * In some of ASICs, the highest_perf is not the one in the _CPC table, so we
1055 * need to expose it to sysfs.
1056 */
1057static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
1058 char *buf)
1059{
1060 u32 perf;
1061 struct amd_cpudata *cpudata = policy->driver_data;
1062
1063 perf = READ_ONCE(cpudata->highest_perf);
1064
3ec32b6d 1065 return sysfs_emit(buf, "%u\n", perf);
3ad7fde1
HR
1066}
1067
e571a5e2
ML
1068static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
1069 char *buf)
1070{
1071 u32 perf;
1072 struct amd_cpudata *cpudata = policy->driver_data;
1073
1074 perf = READ_ONCE(cpudata->prefcore_ranking);
1075
1076 return sysfs_emit(buf, "%u\n", perf);
1077}
1078
f3a05239
ML
1079static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
1080 char *buf)
1081{
1082 bool hw_prefcore;
1083 struct amd_cpudata *cpudata = policy->driver_data;
1084
1085 hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
1086
1087 return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore));
1088}
1089
ffa5096a
PY
1090static ssize_t show_energy_performance_available_preferences(
1091 struct cpufreq_policy *policy, char *buf)
1092{
1093 int i = 0;
1094 int offset = 0;
142c169b
AJ
1095 struct amd_cpudata *cpudata = policy->driver_data;
1096
1097 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
1098 return sysfs_emit_at(buf, offset, "%s\n",
1099 energy_perf_strings[EPP_INDEX_PERFORMANCE]);
ffa5096a
PY
1100
1101 while (energy_perf_strings[i] != NULL)
1102 offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
1103
142c169b 1104 offset += sysfs_emit_at(buf, offset, "\n");
ffa5096a
PY
1105
1106 return offset;
1107}
1108
1109static ssize_t store_energy_performance_preference(
1110 struct cpufreq_policy *policy, const char *buf, size_t count)
1111{
1112 struct amd_cpudata *cpudata = policy->driver_data;
1113 char str_preference[21];
1114 ssize_t ret;
1115
1116 ret = sscanf(buf, "%20s", str_preference);
1117 if (ret != 1)
1118 return -EINVAL;
1119
1120 ret = match_string(energy_perf_strings, -1, str_preference);
1121 if (ret < 0)
1122 return -EINVAL;
1123
1124 mutex_lock(&amd_pstate_limits_lock);
1125 ret = amd_pstate_set_energy_pref_index(cpudata, ret);
1126 mutex_unlock(&amd_pstate_limits_lock);
1127
1128 return ret ?: count;
1129}
1130
1131static ssize_t show_energy_performance_preference(
1132 struct cpufreq_policy *policy, char *buf)
1133{
1134 struct amd_cpudata *cpudata = policy->driver_data;
1135 int preference;
1136
1137 preference = amd_pstate_get_energy_pref_index(cpudata);
1138 if (preference < 0)
1139 return preference;
1140
1141 return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
1142}
1143
3ca7bc81
WK
1144static void amd_pstate_driver_cleanup(void)
1145{
1146 amd_pstate_enable(false);
1147 cppc_state = AMD_PSTATE_DISABLE;
1148 current_pstate_driver = NULL;
1149}
1150
1151static int amd_pstate_register_driver(int mode)
1152{
1153 int ret;
1154
1155 if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED)
1156 current_pstate_driver = &amd_pstate_driver;
1157 else if (mode == AMD_PSTATE_ACTIVE)
1158 current_pstate_driver = &amd_pstate_epp_driver;
1159 else
1160 return -EINVAL;
1161
1162 cppc_state = mode;
1163 ret = cpufreq_register_driver(current_pstate_driver);
1164 if (ret) {
1165 amd_pstate_driver_cleanup();
1166 return ret;
1167 }
1168 return 0;
1169}
1170
1171static int amd_pstate_unregister_driver(int dummy)
1172{
1173 cpufreq_unregister_driver(current_pstate_driver);
1174 amd_pstate_driver_cleanup();
1175 return 0;
1176}
1177
1178static int amd_pstate_change_mode_without_dvr_change(int mode)
1179{
1180 int cpu = 0;
1181
1182 cppc_state = mode;
1183
1184 if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
1185 return 0;
1186
1187 for_each_present_cpu(cpu) {
1188 cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
1189 }
1190
1191 return 0;
1192}
1193
1194static int amd_pstate_change_driver_mode(int mode)
1195{
1196 int ret;
1197
1198 ret = amd_pstate_unregister_driver(0);
1199 if (ret)
1200 return ret;
1201
1202 ret = amd_pstate_register_driver(mode);
1203 if (ret)
1204 return ret;
1205
1206 return 0;
1207}
1208
11fa52fe 1209static cppc_mode_transition_fn mode_state_machine[AMD_PSTATE_MAX][AMD_PSTATE_MAX] = {
3ca7bc81
WK
1210 [AMD_PSTATE_DISABLE] = {
1211 [AMD_PSTATE_DISABLE] = NULL,
1212 [AMD_PSTATE_PASSIVE] = amd_pstate_register_driver,
1213 [AMD_PSTATE_ACTIVE] = amd_pstate_register_driver,
1214 [AMD_PSTATE_GUIDED] = amd_pstate_register_driver,
1215 },
1216 [AMD_PSTATE_PASSIVE] = {
1217 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
1218 [AMD_PSTATE_PASSIVE] = NULL,
1219 [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode,
1220 [AMD_PSTATE_GUIDED] = amd_pstate_change_mode_without_dvr_change,
1221 },
1222 [AMD_PSTATE_ACTIVE] = {
1223 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
1224 [AMD_PSTATE_PASSIVE] = amd_pstate_change_driver_mode,
1225 [AMD_PSTATE_ACTIVE] = NULL,
1226 [AMD_PSTATE_GUIDED] = amd_pstate_change_driver_mode,
1227 },
1228 [AMD_PSTATE_GUIDED] = {
1229 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
1230 [AMD_PSTATE_PASSIVE] = amd_pstate_change_mode_without_dvr_change,
1231 [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode,
1232 [AMD_PSTATE_GUIDED] = NULL,
1233 },
1234};
1235
abd61c08
PY
1236static ssize_t amd_pstate_show_status(char *buf)
1237{
1238 if (!current_pstate_driver)
1239 return sysfs_emit(buf, "disable\n");
1240
1241 return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
1242}
1243
abd61c08
PY
1244static int amd_pstate_update_status(const char *buf, size_t size)
1245{
abd61c08
PY
1246 int mode_idx;
1247
3ca7bc81 1248 if (size > strlen("passive") || size < strlen("active"))
abd61c08 1249 return -EINVAL;
abd61c08 1250
3ca7bc81 1251 mode_idx = get_mode_idx_from_str(buf, size);
abd61c08 1252
3ca7bc81
WK
1253 if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
1254 return -EINVAL;
abd61c08 1255
3ca7bc81
WK
1256 if (mode_state_machine[cppc_state][mode_idx])
1257 return mode_state_machine[cppc_state][mode_idx](mode_idx);
abd61c08 1258
3ca7bc81 1259 return 0;
abd61c08
PY
1260}
1261
5e720f8c
TW
1262static ssize_t status_show(struct device *dev,
1263 struct device_attribute *attr, char *buf)
abd61c08
PY
1264{
1265 ssize_t ret;
1266
1267 mutex_lock(&amd_pstate_driver_lock);
1268 ret = amd_pstate_show_status(buf);
1269 mutex_unlock(&amd_pstate_driver_lock);
1270
1271 return ret;
1272}
1273
5e720f8c 1274static ssize_t status_store(struct device *a, struct device_attribute *b,
abd61c08
PY
1275 const char *buf, size_t count)
1276{
1277 char *p = memchr(buf, '\n', count);
1278 int ret;
1279
1280 mutex_lock(&amd_pstate_driver_lock);
1281 ret = amd_pstate_update_status(buf, p ? p - buf : count);
1282 mutex_unlock(&amd_pstate_driver_lock);
1283
1284 return ret < 0 ? ret : count;
1285}
1286
f3a05239
ML
1287static ssize_t prefcore_show(struct device *dev,
1288 struct device_attribute *attr, char *buf)
1289{
1290 return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
1291}
1292
ec4e3326
HR
1293cpufreq_freq_attr_ro(amd_pstate_max_freq);
1294cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
1295
3ad7fde1 1296cpufreq_freq_attr_ro(amd_pstate_highest_perf);
e571a5e2 1297cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking);
f3a05239 1298cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
ffa5096a
PY
1299cpufreq_freq_attr_rw(energy_performance_preference);
1300cpufreq_freq_attr_ro(energy_performance_available_preferences);
5e720f8c 1301static DEVICE_ATTR_RW(status);
f3a05239 1302static DEVICE_ATTR_RO(prefcore);
3ad7fde1 1303
ec4e3326
HR
1304static struct freq_attr *amd_pstate_attr[] = {
1305 &amd_pstate_max_freq,
1306 &amd_pstate_lowest_nonlinear_freq,
3ad7fde1 1307 &amd_pstate_highest_perf,
e571a5e2 1308 &amd_pstate_prefcore_ranking,
f3a05239 1309 &amd_pstate_hw_prefcore,
ec4e3326
HR
1310 NULL,
1311};
1312
ffa5096a
PY
1313static struct freq_attr *amd_pstate_epp_attr[] = {
1314 &amd_pstate_max_freq,
1315 &amd_pstate_lowest_nonlinear_freq,
1316 &amd_pstate_highest_perf,
e571a5e2 1317 &amd_pstate_prefcore_ranking,
f3a05239 1318 &amd_pstate_hw_prefcore,
ffa5096a
PY
1319 &energy_performance_preference,
1320 &energy_performance_available_preferences,
1321 NULL,
1322};
1323
abd61c08 1324static struct attribute *pstate_global_attributes[] = {
5e720f8c 1325 &dev_attr_status.attr,
f3a05239 1326 &dev_attr_prefcore.attr,
abd61c08
PY
1327 NULL
1328};
1329
1330static const struct attribute_group amd_pstate_global_attr_group = {
3666062b 1331 .name = "amd_pstate",
abd61c08
PY
1332 .attrs = pstate_global_attributes,
1333};
1334
32f80b9a
ML
1335static bool amd_pstate_acpi_pm_profile_server(void)
1336{
1337 switch (acpi_gbl_FADT.preferred_profile) {
1338 case PM_ENTERPRISE_SERVER:
1339 case PM_SOHO_SERVER:
1340 case PM_PERFORMANCE_SERVER:
1341 return true;
1342 }
1343 return false;
1344}
1345
1346static bool amd_pstate_acpi_pm_profile_undefined(void)
1347{
1348 if (acpi_gbl_FADT.preferred_profile == PM_UNSPECIFIED)
1349 return true;
1350 if (acpi_gbl_FADT.preferred_profile >= NR_PM_PROFILES)
1351 return true;
1352 return false;
1353}
1354
ffa5096a
PY
1355static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
1356{
5c3fd1ed 1357 int min_freq, max_freq, nominal_freq, ret;
ffa5096a
PY
1358 struct amd_cpudata *cpudata;
1359 struct device *dev;
ffa5096a
PY
1360 u64 value;
1361
1362 /*
1363 * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
1364 * which is ideal for initialization process.
1365 */
1366 amd_perf_ctl_reset(policy->cpu);
1367 dev = get_cpu_device(policy->cpu);
1368 if (!dev)
7cca9a98 1369 return -ENODEV;
ffa5096a
PY
1370
1371 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
1372 if (!cpudata)
1373 return -ENOMEM;
1374
1375 cpudata->cpu = policy->cpu;
1376 cpudata->epp_policy = 0;
1377
f3a05239
ML
1378 amd_pstate_init_prefcore(cpudata);
1379
7cca9a98
AB
1380 ret = amd_pstate_init_perf(cpudata);
1381 if (ret)
ffa5096a
PY
1382 goto free_cpudata1;
1383
5547c0eb
PY
1384 ret = amd_pstate_init_freq(cpudata);
1385 if (ret)
1386 goto free_cpudata1;
1387
3cbbe887
GS
1388 min_freq = READ_ONCE(cpudata->min_freq);
1389 max_freq = READ_ONCE(cpudata->max_freq);
1390 nominal_freq = READ_ONCE(cpudata->nominal_freq);
2ddb8a39
PY
1391 if (min_freq <= 0 || max_freq <= 0 ||
1392 nominal_freq <= 0 || min_freq > max_freq) {
1393 dev_err(dev,
1394 "min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect, check _CPC in ACPI tables\n",
1395 min_freq, max_freq, nominal_freq);
ffa5096a
PY
1396 ret = -EINVAL;
1397 goto free_cpudata1;
1398 }
1399
1400 policy->cpuinfo.min_freq = min_freq;
1401 policy->cpuinfo.max_freq = max_freq;
1402 /* It will be updated by governor */
1403 policy->cur = policy->cpuinfo.min_freq;
1404
ffa5096a
PY
1405 policy->driver_data = cpudata;
1406
1407 cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
1408
1409 policy->min = policy->cpuinfo.min_freq;
1410 policy->max = policy->cpuinfo.max_freq;
1411
1412 /*
32f80b9a 1413 * Set the policy to provide a valid fallback value in case
ffa5096a
PY
1414 * the default cpufreq governor is neither powersave nor performance.
1415 */
32f80b9a
ML
1416 if (amd_pstate_acpi_pm_profile_server() ||
1417 amd_pstate_acpi_pm_profile_undefined())
1418 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1419 else
1420 policy->policy = CPUFREQ_POLICY_POWERSAVE;
ffa5096a
PY
1421
1422 if (boot_cpu_has(X86_FEATURE_CPPC)) {
ffa5096a
PY
1423 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
1424 if (ret)
1425 return ret;
1426 WRITE_ONCE(cpudata->cppc_req_cached, value);
1427
1428 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
1429 if (ret)
1430 return ret;
1431 WRITE_ONCE(cpudata->cppc_cap1_cached, value);
1432 }
1433 amd_pstate_boost_init(cpudata);
1434
1435 return 0;
1436
1437free_cpudata1:
1438 kfree(cpudata);
1439 return ret;
1440}
1441
1442static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
1443{
cea04f3d
PM
1444 struct amd_cpudata *cpudata = policy->driver_data;
1445
1446 if (cpudata) {
1447 kfree(cpudata);
1448 policy->driver_data = NULL;
1449 }
1450
ffa5096a 1451 pr_debug("CPU %d exiting\n", policy->cpu);
ffa5096a
PY
1452 return 0;
1453}
1454
febab20c 1455static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
ffa5096a 1456{
ffa5096a 1457 struct amd_cpudata *cpudata = policy->driver_data;
febab20c 1458 u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
ffa5096a
PY
1459 u64 value;
1460 s16 epp;
1461
1462 max_perf = READ_ONCE(cpudata->highest_perf);
1463 min_perf = READ_ONCE(cpudata->lowest_perf);
febab20c
WK
1464 max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
1465 min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
1466
8164f743
ML
1467 if (min_limit_perf < min_perf)
1468 min_limit_perf = min_perf;
1469
1470 if (max_limit_perf < min_limit_perf)
1471 max_limit_perf = min_limit_perf;
1472
22fb4f04
ML
1473 WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
1474 WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
1475
febab20c
WK
1476 max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
1477 cpudata->max_limit_perf);
1478 min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
1479 cpudata->max_limit_perf);
ffa5096a
PY
1480 value = READ_ONCE(cpudata->cppc_req_cached);
1481
1482 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
1483 min_perf = max_perf;
1484
1485 /* Initial min/max values for CPPC Performance Controls Register */
1486 value &= ~AMD_CPPC_MIN_PERF(~0L);
1487 value |= AMD_CPPC_MIN_PERF(min_perf);
1488
1489 value &= ~AMD_CPPC_MAX_PERF(~0L);
1490 value |= AMD_CPPC_MAX_PERF(max_perf);
1491
1492 /* CPPC EPP feature require to set zero to the desire perf bit */
1493 value &= ~AMD_CPPC_DES_PERF(~0L);
1494 value |= AMD_CPPC_DES_PERF(0);
1495
ffa5096a
PY
1496 cpudata->epp_policy = cpudata->policy;
1497
6e9d1212
WK
1498 /* Get BIOS pre-defined epp value */
1499 epp = amd_pstate_get_epp(cpudata, value);
1500 if (epp < 0) {
1501 /**
1502 * This return value can only be negative for shared_memory
1503 * systems where EPP register read/write not supported.
1504 */
febab20c 1505 return;
ffa5096a 1506 }
6e9d1212
WK
1507
1508 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
1509 epp = 0;
1510
ffa5096a
PY
1511 /* Set initial EPP value */
1512 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1513 value &= ~GENMASK_ULL(31, 24);
1514 value |= (u64)epp << 24;
1515 }
1516
6e9d1212 1517 WRITE_ONCE(cpudata->cppc_req_cached, value);
7cca9a98 1518 amd_pstate_set_epp(cpudata, epp);
ffa5096a
PY
1519}
1520
1521static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
1522{
1523 struct amd_cpudata *cpudata = policy->driver_data;
1524
1525 if (!policy->cpuinfo.max_freq)
1526 return -ENODEV;
1527
1528 pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
1529 policy->cpuinfo.max_freq, policy->max);
1530
1531 cpudata->policy = policy->policy;
1532
febab20c 1533 amd_pstate_epp_update_limit(policy);
ffa5096a
PY
1534
1535 return 0;
1536}
1537
d4da12f8
PY
1538static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
1539{
1540 struct cppc_perf_ctrls perf_ctrls;
1541 u64 value, max_perf;
1542 int ret;
1543
1544 ret = amd_pstate_enable(true);
1545 if (ret)
1546 pr_err("failed to enable amd pstate during resume, return %d\n", ret);
1547
1548 value = READ_ONCE(cpudata->cppc_req_cached);
1549 max_perf = READ_ONCE(cpudata->highest_perf);
1550
1551 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1552 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
1553 } else {
1554 perf_ctrls.max_perf = max_perf;
1555 perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
1556 cppc_set_perf(cpudata->cpu, &perf_ctrls);
1557 }
1558}
1559
1560static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
1561{
1562 struct amd_cpudata *cpudata = policy->driver_data;
1563
1564 pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
1565
1566 if (cppc_state == AMD_PSTATE_ACTIVE) {
1567 amd_pstate_epp_reenable(cpudata);
1568 cpudata->suspended = false;
1569 }
1570
1571 return 0;
1572}
1573
1574static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
1575{
1576 struct amd_cpudata *cpudata = policy->driver_data;
1577 struct cppc_perf_ctrls perf_ctrls;
1578 int min_perf;
1579 u64 value;
1580
1581 min_perf = READ_ONCE(cpudata->lowest_perf);
1582 value = READ_ONCE(cpudata->cppc_req_cached);
1583
1584 mutex_lock(&amd_pstate_limits_lock);
1585 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1586 cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
1587
1588 /* Set max perf same as min perf */
1589 value &= ~AMD_CPPC_MAX_PERF(~0L);
1590 value |= AMD_CPPC_MAX_PERF(min_perf);
1591 value &= ~AMD_CPPC_MIN_PERF(~0L);
1592 value |= AMD_CPPC_MIN_PERF(min_perf);
1593 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
1594 } else {
1595 perf_ctrls.desired_perf = 0;
1596 perf_ctrls.max_perf = min_perf;
1597 perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
1598 cppc_set_perf(cpudata->cpu, &perf_ctrls);
1599 }
1600 mutex_unlock(&amd_pstate_limits_lock);
1601}
1602
1603static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
1604{
1605 struct amd_cpudata *cpudata = policy->driver_data;
1606
1607 pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
1608
1609 if (cpudata->suspended)
1610 return 0;
1611
1612 if (cppc_state == AMD_PSTATE_ACTIVE)
1613 amd_pstate_epp_offline(policy);
1614
1615 return 0;
1616}
1617
ffa5096a
PY
1618static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
1619{
1620 cpufreq_verify_within_cpu_limits(policy);
1621 pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
1622 return 0;
1623}
1624
50ddd2f7
PY
1625static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
1626{
1627 struct amd_cpudata *cpudata = policy->driver_data;
1628 int ret;
1629
1630 /* avoid suspending when EPP is not enabled */
1631 if (cppc_state != AMD_PSTATE_ACTIVE)
1632 return 0;
1633
1634 /* set this flag to avoid setting core offline*/
1635 cpudata->suspended = true;
1636
1637 /* disable CPPC in lowlevel firmware */
1638 ret = amd_pstate_enable(false);
1639 if (ret)
1640 pr_err("failed to suspend, return %d\n", ret);
1641
1642 return 0;
1643}
1644
1645static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
1646{
1647 struct amd_cpudata *cpudata = policy->driver_data;
1648
1649 if (cpudata->suspended) {
1650 mutex_lock(&amd_pstate_limits_lock);
1651
1652 /* enable amd pstate from suspend state*/
1653 amd_pstate_epp_reenable(cpudata);
1654
1655 mutex_unlock(&amd_pstate_limits_lock);
1656
1657 cpudata->suspended = false;
1658 }
1659
1660 return 0;
1661}
1662
ec437d71
HR
1663static struct cpufreq_driver amd_pstate_driver = {
1664 .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
1665 .verify = amd_pstate_verify,
1666 .target = amd_pstate_target,
4badf2eb 1667 .fast_switch = amd_pstate_fast_switch,
ec437d71
HR
1668 .init = amd_pstate_cpu_init,
1669 .exit = amd_pstate_cpu_exit,
b376471f
JS
1670 .suspend = amd_pstate_cpu_suspend,
1671 .resume = amd_pstate_cpu_resume,
41271016 1672 .set_boost = amd_pstate_set_boost,
e571a5e2 1673 .update_limits = amd_pstate_update_limits,
ec437d71 1674 .name = "amd-pstate",
d8bee41d 1675 .attr = amd_pstate_attr,
ec437d71
HR
1676};
1677
ffa5096a
PY
1678static struct cpufreq_driver amd_pstate_epp_driver = {
1679 .flags = CPUFREQ_CONST_LOOPS,
1680 .verify = amd_pstate_epp_verify_policy,
1681 .setpolicy = amd_pstate_epp_set_policy,
1682 .init = amd_pstate_epp_cpu_init,
1683 .exit = amd_pstate_epp_cpu_exit,
d4da12f8
PY
1684 .offline = amd_pstate_epp_cpu_offline,
1685 .online = amd_pstate_epp_cpu_online,
50ddd2f7
PY
1686 .suspend = amd_pstate_epp_suspend,
1687 .resume = amd_pstate_epp_resume,
e571a5e2 1688 .update_limits = amd_pstate_update_limits,
f4aad639 1689 .name = "amd-pstate-epp",
ffa5096a
PY
1690 .attr = amd_pstate_epp_attr,
1691};
1692
c88ad30e
ML
1693static int __init amd_pstate_set_driver(int mode_idx)
1694{
1695 if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
1696 cppc_state = mode_idx;
1697 if (cppc_state == AMD_PSTATE_DISABLE)
1698 pr_info("driver is explicitly disabled\n");
1699
1700 if (cppc_state == AMD_PSTATE_ACTIVE)
1701 current_pstate_driver = &amd_pstate_epp_driver;
1702
1703 if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
1704 current_pstate_driver = &amd_pstate_driver;
1705
1706 return 0;
1707 }
1708
1709 return -EINVAL;
1710}
1711
ec437d71
HR
1712static int __init amd_pstate_init(void)
1713{
3666062b 1714 struct device *dev_root;
ec437d71
HR
1715 int ret;
1716
1717 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
1718 return -ENODEV;
1719
1720 if (!acpi_cpc_valid()) {
a2a9d185 1721 pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
ec437d71
HR
1722 return -ENODEV;
1723 }
1724
1725 /* don't keep reloading if cpufreq_driver exists */
1726 if (cpufreq_get_current_driver())
1727 return -EEXIST;
1728
eb8b6c36
PY
1729 quirks = NULL;
1730
1731 /* check if this machine need CPPC quirks */
1732 dmi_check_system(amd_pstate_quirks_table);
1733
c88ad30e
ML
1734 switch (cppc_state) {
1735 case AMD_PSTATE_UNDEFINED:
1736 /* Disable on the following configs by default:
1737 * 1. Undefined platforms
1738 * 2. Server platforms
1739 * 3. Shared memory designs
1740 */
1741 if (amd_pstate_acpi_pm_profile_undefined() ||
1742 amd_pstate_acpi_pm_profile_server() ||
1743 !boot_cpu_has(X86_FEATURE_CPPC)) {
1744 pr_info("driver load is disabled, boot with specific mode to enable this\n");
1745 return -ENODEV;
1746 }
1747 ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE);
1748 if (ret)
1749 return ret;
1750 break;
1751 case AMD_PSTATE_DISABLE:
1752 return -ENODEV;
1753 case AMD_PSTATE_PASSIVE:
1754 case AMD_PSTATE_ACTIVE:
1755 case AMD_PSTATE_GUIDED:
1756 break;
1757 default:
1758 return -EINVAL;
1759 }
1760
ec437d71 1761 /* capability check */
e059c184
HR
1762 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1763 pr_debug("AMD CPPC MSR based functionality is supported\n");
2dd6d0eb 1764 if (cppc_state != AMD_PSTATE_ACTIVE)
ffa5096a 1765 current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
202e683d
PY
1766 } else {
1767 pr_debug("AMD CPPC shared memory based functionality is supported\n");
e059c184
HR
1768 static_call_update(amd_pstate_enable, cppc_enable);
1769 static_call_update(amd_pstate_init_perf, cppc_init_perf);
1770 static_call_update(amd_pstate_update_perf, cppc_update_perf);
ec437d71
HR
1771 }
1772
1773 /* enable amd pstate feature */
1774 ret = amd_pstate_enable(true);
1775 if (ret) {
ffa5096a 1776 pr_err("failed to enable with return %d\n", ret);
ec437d71
HR
1777 return ret;
1778 }
1779
ffa5096a 1780 ret = cpufreq_register_driver(current_pstate_driver);
ec437d71 1781 if (ret)
ffa5096a 1782 pr_err("failed to register with return %d\n", ret);
ec437d71 1783
3666062b
GKH
1784 dev_root = bus_get_dev_root(&cpu_subsys);
1785 if (dev_root) {
1786 ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group);
1787 put_device(dev_root);
1788 if (ret) {
1789 pr_err("sysfs attribute export failed with error %d.\n", ret);
1790 goto global_attr_free;
1791 }
abd61c08
PY
1792 }
1793
1794 return ret;
1795
1796global_attr_free:
abd61c08 1797 cpufreq_unregister_driver(current_pstate_driver);
ec437d71
HR
1798 return ret;
1799}
456ca88d 1800device_initcall(amd_pstate_init);
ec437d71 1801
202e683d
PY
1802static int __init amd_pstate_param(char *str)
1803{
36c5014e
WK
1804 size_t size;
1805 int mode_idx;
1806
202e683d
PY
1807 if (!str)
1808 return -EINVAL;
1809
36c5014e
WK
1810 size = strlen(str);
1811 mode_idx = get_mode_idx_from_str(str, size);
202e683d 1812
c88ad30e 1813 return amd_pstate_set_driver(mode_idx);
202e683d 1814}
f3a05239
ML
1815
1816static int __init amd_prefcore_param(char *str)
1817{
1818 if (!strcmp(str, "disable"))
1819 amd_pstate_prefcore = false;
1820
1821 return 0;
1822}
1823
202e683d 1824early_param("amd_pstate", amd_pstate_param);
f3a05239 1825early_param("amd_prefcore", amd_prefcore_param);
202e683d 1826
ec437d71
HR
1827MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
1828MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");