2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <trace/events/power.h>
31 #include <asm/div64.h>
33 #include <asm/cpu_device_id.h>
35 #define BYT_RATIOS 0x66a
36 #define BYT_VIDS 0x66b
37 #define BYT_TURBO_RATIOS 0x66c
38 #define BYT_TURBO_VIDS 0x66d
41 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
42 #define fp_toint(X) ((X) >> FRAC_BITS)
45 static inline int32_t mul_fp(int32_t x, int32_t y)
47 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
50 static inline int32_t div_fp(int32_t x, int32_t y)
52 return div_s64((int64_t)x << FRAC_BITS, y);
55 static inline int ceiling_fp(int32_t x)
60 mask = (1 << FRAC_BITS) - 1;
67 int32_t core_pct_busy;
102 struct timer_list timer;
104 struct pstate_data pstate;
108 ktime_t last_sample_time;
111 struct sample sample;
114 static struct cpudata **all_cpu_data;
115 struct pstate_adjust_policy {
124 struct pstate_funcs {
125 int (*get_max)(void);
126 int (*get_min)(void);
127 int (*get_turbo)(void);
128 int (*get_scaling)(void);
129 void (*set)(struct cpudata*, int pstate);
130 void (*get_vid)(struct cpudata *);
133 struct cpu_defaults {
134 struct pstate_adjust_policy pid_policy;
135 struct pstate_funcs funcs;
138 static struct pstate_adjust_policy pid_params;
139 static struct pstate_funcs pstate_funcs;
140 static int hwp_active;
153 static struct perf_limits limits = {
157 .max_perf = int_tofp(1),
160 .max_policy_pct = 100,
161 .max_sysfs_pct = 100,
164 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
165 int deadband, int integral) {
166 pid->setpoint = setpoint;
167 pid->deadband = deadband;
168 pid->integral = int_tofp(integral);
169 pid->last_err = int_tofp(setpoint) - int_tofp(busy);
172 static inline void pid_p_gain_set(struct _pid *pid, int percent)
174 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
177 static inline void pid_i_gain_set(struct _pid *pid, int percent)
179 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
182 static inline void pid_d_gain_set(struct _pid *pid, int percent)
184 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
187 static signed int pid_calc(struct _pid *pid, int32_t busy)
190 int32_t pterm, dterm, fp_error;
191 int32_t integral_limit;
193 fp_error = int_tofp(pid->setpoint) - busy;
195 if (abs(fp_error) <= int_tofp(pid->deadband))
198 pterm = mul_fp(pid->p_gain, fp_error);
200 pid->integral += fp_error;
202 /* limit the integral term */
203 integral_limit = int_tofp(30);
204 if (pid->integral > integral_limit)
205 pid->integral = integral_limit;
206 if (pid->integral < -integral_limit)
207 pid->integral = -integral_limit;
209 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
210 pid->last_err = fp_error;
212 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
213 result = result + (1 << (FRAC_BITS-1));
214 return (signed int)fp_toint(result);
217 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
219 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
220 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
221 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
223 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
226 static inline void intel_pstate_reset_all_pid(void)
230 for_each_online_cpu(cpu) {
231 if (all_cpu_data[cpu])
232 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
236 static inline void update_turbo_state(void)
241 cpu = all_cpu_data[0];
242 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
243 limits.turbo_disabled =
244 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
245 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
248 #define PCT_TO_HWP(x) (x * 255 / 100)
249 static void intel_pstate_hwp_set(void)
256 for_each_online_cpu(cpu) {
257 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
258 min = PCT_TO_HWP(limits.min_perf_pct);
259 value &= ~HWP_MIN_PERF(~0L);
260 value |= HWP_MIN_PERF(min);
262 max = PCT_TO_HWP(limits.max_perf_pct);
263 if (limits.no_turbo) {
264 rdmsrl( MSR_HWP_CAPABILITIES, freq);
265 max = HWP_GUARANTEED_PERF(freq);
268 value &= ~HWP_MAX_PERF(~0L);
269 value |= HWP_MAX_PERF(max);
270 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
276 /************************** debugfs begin ************************/
277 static int pid_param_set(void *data, u64 val)
280 intel_pstate_reset_all_pid();
284 static int pid_param_get(void *data, u64 *val)
289 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
296 static struct pid_param pid_files[] = {
297 {"sample_rate_ms", &pid_params.sample_rate_ms},
298 {"d_gain_pct", &pid_params.d_gain_pct},
299 {"i_gain_pct", &pid_params.i_gain_pct},
300 {"deadband", &pid_params.deadband},
301 {"setpoint", &pid_params.setpoint},
302 {"p_gain_pct", &pid_params.p_gain_pct},
306 static void __init intel_pstate_debug_expose_params(void)
308 struct dentry *debugfs_parent;
313 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
314 if (IS_ERR_OR_NULL(debugfs_parent))
316 while (pid_files[i].name) {
317 debugfs_create_file(pid_files[i].name, 0660,
318 debugfs_parent, pid_files[i].value,
324 /************************** debugfs end ************************/
326 /************************** sysfs begin ************************/
327 #define show_one(file_name, object) \
328 static ssize_t show_##file_name \
329 (struct kobject *kobj, struct attribute *attr, char *buf) \
331 return sprintf(buf, "%u\n", limits.object); \
334 static ssize_t show_no_turbo(struct kobject *kobj,
335 struct attribute *attr, char *buf)
339 update_turbo_state();
340 if (limits.turbo_disabled)
341 ret = sprintf(buf, "%u\n", limits.turbo_disabled);
343 ret = sprintf(buf, "%u\n", limits.no_turbo);
348 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
349 const char *buf, size_t count)
354 ret = sscanf(buf, "%u", &input);
358 update_turbo_state();
359 if (limits.turbo_disabled) {
360 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
364 limits.no_turbo = clamp_t(int, input, 0, 1);
367 intel_pstate_hwp_set();
372 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
373 const char *buf, size_t count)
378 ret = sscanf(buf, "%u", &input);
382 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
383 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
384 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
387 intel_pstate_hwp_set();
391 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
392 const char *buf, size_t count)
397 ret = sscanf(buf, "%u", &input);
400 limits.min_perf_pct = clamp_t(int, input, 0 , 100);
401 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
404 intel_pstate_hwp_set();
408 show_one(max_perf_pct, max_perf_pct);
409 show_one(min_perf_pct, min_perf_pct);
411 define_one_global_rw(no_turbo);
412 define_one_global_rw(max_perf_pct);
413 define_one_global_rw(min_perf_pct);
415 static struct attribute *intel_pstate_attributes[] = {
422 static struct attribute_group intel_pstate_attr_group = {
423 .attrs = intel_pstate_attributes,
426 static void __init intel_pstate_sysfs_expose_params(void)
428 struct kobject *intel_pstate_kobject;
431 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
432 &cpu_subsys.dev_root->kobj);
433 BUG_ON(!intel_pstate_kobject);
434 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
437 /************************** sysfs end ************************/
439 static void intel_pstate_hwp_enable(void)
442 pr_info("intel_pstate HWP enabled\n");
444 wrmsrl( MSR_PM_ENABLE, 0x1);
447 static int byt_get_min_pstate(void)
451 rdmsrl(BYT_RATIOS, value);
452 return (value >> 8) & 0x7F;
455 static int byt_get_max_pstate(void)
459 rdmsrl(BYT_RATIOS, value);
460 return (value >> 16) & 0x7F;
463 static int byt_get_turbo_pstate(void)
467 rdmsrl(BYT_TURBO_RATIOS, value);
471 static void byt_set_pstate(struct cpudata *cpudata, int pstate)
478 if (limits.no_turbo && !limits.turbo_disabled)
481 vid_fp = cpudata->vid.min + mul_fp(
482 int_tofp(pstate - cpudata->pstate.min_pstate),
485 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
486 vid = ceiling_fp(vid_fp);
488 if (pstate > cpudata->pstate.max_pstate)
489 vid = cpudata->vid.turbo;
493 wrmsrl(MSR_IA32_PERF_CTL, val);
496 #define BYT_BCLK_FREQS 5
497 static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
499 static int byt_get_scaling(void)
504 rdmsrl(MSR_FSB_FREQ, value);
507 BUG_ON(i > BYT_BCLK_FREQS);
509 return byt_freq_table[i] * 100;
512 static void byt_get_vid(struct cpudata *cpudata)
516 rdmsrl(BYT_VIDS, value);
517 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
518 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
519 cpudata->vid.ratio = div_fp(
520 cpudata->vid.max - cpudata->vid.min,
521 int_tofp(cpudata->pstate.max_pstate -
522 cpudata->pstate.min_pstate));
524 rdmsrl(BYT_TURBO_VIDS, value);
525 cpudata->vid.turbo = value & 0x7f;
528 static int core_get_min_pstate(void)
532 rdmsrl(MSR_PLATFORM_INFO, value);
533 return (value >> 40) & 0xFF;
536 static int core_get_max_pstate(void)
540 rdmsrl(MSR_PLATFORM_INFO, value);
541 return (value >> 8) & 0xFF;
544 static int core_get_turbo_pstate(void)
549 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
550 nont = core_get_max_pstate();
557 static inline int core_get_scaling(void)
562 static void core_set_pstate(struct cpudata *cpudata, int pstate)
567 if (limits.no_turbo && !limits.turbo_disabled)
570 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
573 static struct cpu_defaults core_params = {
575 .sample_rate_ms = 10,
583 .get_max = core_get_max_pstate,
584 .get_min = core_get_min_pstate,
585 .get_turbo = core_get_turbo_pstate,
586 .get_scaling = core_get_scaling,
587 .set = core_set_pstate,
591 static struct cpu_defaults byt_params = {
593 .sample_rate_ms = 10,
601 .get_max = byt_get_max_pstate,
602 .get_min = byt_get_min_pstate,
603 .get_turbo = byt_get_turbo_pstate,
604 .set = byt_set_pstate,
605 .get_scaling = byt_get_scaling,
606 .get_vid = byt_get_vid,
610 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
612 int max_perf = cpu->pstate.turbo_pstate;
616 if (limits.no_turbo || limits.turbo_disabled)
617 max_perf = cpu->pstate.max_pstate;
619 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
620 *max = clamp_t(int, max_perf_adj,
621 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
623 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
624 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
627 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
629 int max_perf, min_perf;
631 update_turbo_state();
633 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
635 pstate = clamp_t(int, pstate, min_perf, max_perf);
637 if (pstate == cpu->pstate.current_pstate)
640 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
642 cpu->pstate.current_pstate = pstate;
644 pstate_funcs.set(cpu, pstate);
647 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
649 cpu->pstate.min_pstate = pstate_funcs.get_min();
650 cpu->pstate.max_pstate = pstate_funcs.get_max();
651 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
652 cpu->pstate.scaling = pstate_funcs.get_scaling();
654 if (pstate_funcs.get_vid)
655 pstate_funcs.get_vid(cpu);
656 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
659 static inline void intel_pstate_calc_busy(struct cpudata *cpu)
661 struct sample *sample = &cpu->sample;
664 core_pct = int_tofp(sample->aperf) * int_tofp(100);
665 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
667 sample->freq = fp_toint(
669 cpu->pstate.max_pstate * cpu->pstate.scaling / 100),
672 sample->core_pct_busy = (int32_t)core_pct;
675 static inline void intel_pstate_sample(struct cpudata *cpu)
680 local_irq_save(flags);
681 rdmsrl(MSR_IA32_APERF, aperf);
682 rdmsrl(MSR_IA32_MPERF, mperf);
683 local_irq_restore(flags);
685 cpu->last_sample_time = cpu->sample.time;
686 cpu->sample.time = ktime_get();
687 cpu->sample.aperf = aperf;
688 cpu->sample.mperf = mperf;
689 cpu->sample.aperf -= cpu->prev_aperf;
690 cpu->sample.mperf -= cpu->prev_mperf;
692 intel_pstate_calc_busy(cpu);
694 cpu->prev_aperf = aperf;
695 cpu->prev_mperf = mperf;
698 static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
702 delay = msecs_to_jiffies(50);
703 mod_timer_pinned(&cpu->timer, jiffies + delay);
706 static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
710 delay = msecs_to_jiffies(pid_params.sample_rate_ms);
711 mod_timer_pinned(&cpu->timer, jiffies + delay);
714 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
716 int32_t core_busy, max_pstate, current_pstate, sample_ratio;
720 core_busy = cpu->sample.core_pct_busy;
721 max_pstate = int_tofp(cpu->pstate.max_pstate);
722 current_pstate = int_tofp(cpu->pstate.current_pstate);
723 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
725 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
726 duration_us = (u32) ktime_us_delta(cpu->sample.time,
727 cpu->last_sample_time);
728 if (duration_us > sample_time * 3) {
729 sample_ratio = div_fp(int_tofp(sample_time),
730 int_tofp(duration_us));
731 core_busy = mul_fp(core_busy, sample_ratio);
737 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
744 busy_scaled = intel_pstate_get_scaled_busy(cpu);
746 ctl = pid_calc(pid, busy_scaled);
748 /* Negative values of ctl increase the pstate and vice versa */
749 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
752 static void intel_hwp_timer_func(unsigned long __data)
754 struct cpudata *cpu = (struct cpudata *) __data;
756 intel_pstate_sample(cpu);
757 intel_hwp_set_sample_time(cpu);
760 static void intel_pstate_timer_func(unsigned long __data)
762 struct cpudata *cpu = (struct cpudata *) __data;
763 struct sample *sample;
765 intel_pstate_sample(cpu);
767 sample = &cpu->sample;
769 intel_pstate_adjust_busy_pstate(cpu);
771 trace_pstate_sample(fp_toint(sample->core_pct_busy),
772 fp_toint(intel_pstate_get_scaled_busy(cpu)),
773 cpu->pstate.current_pstate,
778 intel_pstate_set_sample_time(cpu);
781 #define ICPU(model, policy) \
782 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
783 (unsigned long)&policy }
785 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
786 ICPU(0x2a, core_params),
787 ICPU(0x2d, core_params),
788 ICPU(0x37, byt_params),
789 ICPU(0x3a, core_params),
790 ICPU(0x3c, core_params),
791 ICPU(0x3d, core_params),
792 ICPU(0x3e, core_params),
793 ICPU(0x3f, core_params),
794 ICPU(0x45, core_params),
795 ICPU(0x46, core_params),
796 ICPU(0x47, core_params),
797 ICPU(0x4c, byt_params),
798 ICPU(0x4f, core_params),
799 ICPU(0x56, core_params),
802 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
804 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
805 ICPU(0x56, core_params),
809 static int intel_pstate_init_cpu(unsigned int cpunum)
813 if (!all_cpu_data[cpunum])
814 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
816 if (!all_cpu_data[cpunum])
819 cpu = all_cpu_data[cpunum];
822 intel_pstate_get_cpu_pstates(cpu);
824 init_timer_deferrable(&cpu->timer);
825 cpu->timer.data = (unsigned long)cpu;
826 cpu->timer.expires = jiffies + HZ/100;
829 cpu->timer.function = intel_pstate_timer_func;
831 cpu->timer.function = intel_hwp_timer_func;
833 intel_pstate_busy_pid_reset(cpu);
834 intel_pstate_sample(cpu);
836 add_timer_on(&cpu->timer, cpunum);
838 pr_debug("Intel pstate controlling: cpu %d\n", cpunum);
843 static unsigned int intel_pstate_get(unsigned int cpu_num)
845 struct sample *sample;
848 cpu = all_cpu_data[cpu_num];
851 sample = &cpu->sample;
855 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
857 if (!policy->cpuinfo.max_freq)
860 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
861 limits.min_perf_pct = 100;
862 limits.min_perf = int_tofp(1);
863 limits.max_policy_pct = 100;
864 limits.max_perf_pct = 100;
865 limits.max_perf = int_tofp(1);
870 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
871 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
872 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
874 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
875 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
876 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
877 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
880 intel_pstate_hwp_set();
885 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
887 cpufreq_verify_within_cpu_limits(policy);
889 if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
890 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
896 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
898 int cpu_num = policy->cpu;
899 struct cpudata *cpu = all_cpu_data[cpu_num];
901 pr_info("intel_pstate CPU %d exiting\n", cpu_num);
903 del_timer_sync(&all_cpu_data[cpu_num]->timer);
907 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
910 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
915 rc = intel_pstate_init_cpu(policy->cpu);
919 cpu = all_cpu_data[policy->cpu];
921 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
922 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
924 policy->policy = CPUFREQ_POLICY_POWERSAVE;
926 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
927 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
929 /* cpuinfo and default policy values */
930 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
931 policy->cpuinfo.max_freq =
932 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
933 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
934 cpumask_set_cpu(policy->cpu, policy->cpus);
939 static struct cpufreq_driver intel_pstate_driver = {
940 .flags = CPUFREQ_CONST_LOOPS,
941 .verify = intel_pstate_verify_policy,
942 .setpolicy = intel_pstate_set_policy,
943 .get = intel_pstate_get,
944 .init = intel_pstate_cpu_init,
945 .stop_cpu = intel_pstate_stop_cpu,
946 .name = "intel_pstate",
949 static int __initdata no_load;
950 static int __initdata no_hwp;
952 static int intel_pstate_msrs_not_valid(void)
954 /* Check that all the msr's we are using are valid. */
955 u64 aperf, mperf, tmp;
957 rdmsrl(MSR_IA32_APERF, aperf);
958 rdmsrl(MSR_IA32_MPERF, mperf);
960 if (!pstate_funcs.get_max() ||
961 !pstate_funcs.get_min() ||
962 !pstate_funcs.get_turbo())
965 rdmsrl(MSR_IA32_APERF, tmp);
969 rdmsrl(MSR_IA32_MPERF, tmp);
976 static void copy_pid_params(struct pstate_adjust_policy *policy)
978 pid_params.sample_rate_ms = policy->sample_rate_ms;
979 pid_params.p_gain_pct = policy->p_gain_pct;
980 pid_params.i_gain_pct = policy->i_gain_pct;
981 pid_params.d_gain_pct = policy->d_gain_pct;
982 pid_params.deadband = policy->deadband;
983 pid_params.setpoint = policy->setpoint;
986 static void copy_cpu_funcs(struct pstate_funcs *funcs)
988 pstate_funcs.get_max = funcs->get_max;
989 pstate_funcs.get_min = funcs->get_min;
990 pstate_funcs.get_turbo = funcs->get_turbo;
991 pstate_funcs.get_scaling = funcs->get_scaling;
992 pstate_funcs.set = funcs->set;
993 pstate_funcs.get_vid = funcs->get_vid;
996 #if IS_ENABLED(CONFIG_ACPI)
997 #include <acpi/processor.h>
999 static bool intel_pstate_no_acpi_pss(void)
1003 for_each_possible_cpu(i) {
1005 union acpi_object *pss;
1006 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1007 struct acpi_processor *pr = per_cpu(processors, i);
1012 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
1013 if (ACPI_FAILURE(status))
1016 pss = buffer.pointer;
1017 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
1028 static bool intel_pstate_has_acpi_ppc(void)
1032 for_each_possible_cpu(i) {
1033 struct acpi_processor *pr = per_cpu(processors, i);
1037 if (acpi_has_method(pr->handle, "_PPC"))
1048 struct hw_vendor_info {
1050 char oem_id[ACPI_OEM_ID_SIZE];
1051 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
1055 /* Hardware vendor-specific info that has its own power management modes */
1056 static struct hw_vendor_info vendor_info[] = {
1057 {1, "HP ", "ProLiant", PSS},
1058 {1, "ORACLE", "X4-2 ", PPC},
1059 {1, "ORACLE", "X4-2L ", PPC},
1060 {1, "ORACLE", "X4-2B ", PPC},
1061 {1, "ORACLE", "X3-2 ", PPC},
1062 {1, "ORACLE", "X3-2L ", PPC},
1063 {1, "ORACLE", "X3-2B ", PPC},
1064 {1, "ORACLE", "X4470M2 ", PPC},
1065 {1, "ORACLE", "X4270M3 ", PPC},
1066 {1, "ORACLE", "X4270M2 ", PPC},
1067 {1, "ORACLE", "X4170M2 ", PPC},
1071 static bool intel_pstate_platform_pwr_mgmt_exists(void)
1073 struct acpi_table_header hdr;
1074 struct hw_vendor_info *v_info;
1075 const struct x86_cpu_id *id;
1078 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1080 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1081 if ( misc_pwr & (1 << 8))
1085 if (acpi_disabled ||
1086 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
1089 for (v_info = vendor_info; v_info->valid; v_info++) {
1090 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
1091 !strncmp(hdr.oem_table_id, v_info->oem_table_id,
1092 ACPI_OEM_TABLE_ID_SIZE))
1093 switch (v_info->oem_pwr_table) {
1095 return intel_pstate_no_acpi_pss();
1097 return intel_pstate_has_acpi_ppc();
1103 #else /* CONFIG_ACPI not enabled */
1104 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
1105 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
1106 #endif /* CONFIG_ACPI */
1108 static int __init intel_pstate_init(void)
1111 const struct x86_cpu_id *id;
1112 struct cpu_defaults *cpu_info;
1113 struct cpuinfo_x86 *c = &boot_cpu_data;
1118 id = x86_match_cpu(intel_pstate_cpu_ids);
1123 * The Intel pstate driver will be ignored if the platform
1124 * firmware has its own power management modes.
1126 if (intel_pstate_platform_pwr_mgmt_exists())
1129 cpu_info = (struct cpu_defaults *)id->driver_data;
1131 copy_pid_params(&cpu_info->pid_policy);
1132 copy_cpu_funcs(&cpu_info->funcs);
1134 if (intel_pstate_msrs_not_valid())
1137 pr_info("Intel P-state driver initializing.\n");
1139 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
1143 if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
1144 intel_pstate_hwp_enable();
1146 rc = cpufreq_register_driver(&intel_pstate_driver);
1150 intel_pstate_debug_expose_params();
1151 intel_pstate_sysfs_expose_params();
1156 for_each_online_cpu(cpu) {
1157 if (all_cpu_data[cpu]) {
1158 del_timer_sync(&all_cpu_data[cpu]->timer);
1159 kfree(all_cpu_data[cpu]);
1164 vfree(all_cpu_data);
1167 device_initcall(intel_pstate_init);
1169 static int __init intel_pstate_setup(char *str)
1174 if (!strcmp(str, "disable"))
1176 if (!strcmp(str, "no_hwp"))
1180 early_param("intel_pstate", intel_pstate_setup);
1182 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1183 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1184 MODULE_LICENSE("GPL");