2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <linux/vmalloc.h>
30 #include <trace/events/power.h>
32 #include <asm/div64.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/cpufeature.h>
37 #define ATOM_RATIOS 0x66a
38 #define ATOM_VIDS 0x66b
39 #define ATOM_TURBO_RATIOS 0x66c
40 #define ATOM_TURBO_VIDS 0x66d
43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
44 #define fp_toint(X) ((X) >> FRAC_BITS)
46 static inline int32_t mul_fp(int32_t x, int32_t y)
48 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
51 static inline int32_t div_fp(s64 x, s64 y)
53 return div64_s64((int64_t)x << FRAC_BITS, y);
56 static inline int ceiling_fp(int32_t x)
61 mask = (1 << FRAC_BITS) - 1;
68 int32_t core_pct_busy;
81 int max_pstate_physical;
106 struct update_util_data update_util;
108 struct pstate_data pstate;
112 u64 last_sample_time;
116 u64 prev_cummulative_iowait;
117 struct sample sample;
120 static struct cpudata **all_cpu_data;
121 struct pstate_adjust_policy {
131 struct pstate_funcs {
132 int (*get_max)(void);
133 int (*get_max_physical)(void);
134 int (*get_min)(void);
135 int (*get_turbo)(void);
136 int (*get_scaling)(void);
137 void (*set)(struct cpudata*, int pstate);
138 void (*get_vid)(struct cpudata *);
139 int32_t (*get_target_pstate)(struct cpudata *);
142 struct cpu_defaults {
143 struct pstate_adjust_policy pid_policy;
144 struct pstate_funcs funcs;
147 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
148 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
150 static struct pstate_adjust_policy pid_params;
151 static struct pstate_funcs pstate_funcs;
152 static int hwp_active;
167 static struct perf_limits performance_limits = {
171 .max_perf = int_tofp(1),
173 .min_perf = int_tofp(1),
174 .max_policy_pct = 100,
175 .max_sysfs_pct = 100,
180 static struct perf_limits powersave_limits = {
184 .max_perf = int_tofp(1),
187 .max_policy_pct = 100,
188 .max_sysfs_pct = 100,
193 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
194 static struct perf_limits *limits = &performance_limits;
196 static struct perf_limits *limits = &powersave_limits;
199 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
200 int deadband, int integral) {
201 pid->setpoint = int_tofp(setpoint);
202 pid->deadband = int_tofp(deadband);
203 pid->integral = int_tofp(integral);
204 pid->last_err = int_tofp(setpoint) - int_tofp(busy);
207 static inline void pid_p_gain_set(struct _pid *pid, int percent)
209 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
212 static inline void pid_i_gain_set(struct _pid *pid, int percent)
214 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
217 static inline void pid_d_gain_set(struct _pid *pid, int percent)
219 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
222 static signed int pid_calc(struct _pid *pid, int32_t busy)
225 int32_t pterm, dterm, fp_error;
226 int32_t integral_limit;
228 fp_error = pid->setpoint - busy;
230 if (abs(fp_error) <= pid->deadband)
233 pterm = mul_fp(pid->p_gain, fp_error);
235 pid->integral += fp_error;
238 * We limit the integral here so that it will never
239 * get higher than 30. This prevents it from becoming
240 * too large an input over long periods of time and allows
241 * it to get factored out sooner.
243 * The value of 30 was chosen through experimentation.
245 integral_limit = int_tofp(30);
246 if (pid->integral > integral_limit)
247 pid->integral = integral_limit;
248 if (pid->integral < -integral_limit)
249 pid->integral = -integral_limit;
251 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
252 pid->last_err = fp_error;
254 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
255 result = result + (1 << (FRAC_BITS-1));
256 return (signed int)fp_toint(result);
259 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
261 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
262 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
263 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
265 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
268 static inline void intel_pstate_reset_all_pid(void)
272 for_each_online_cpu(cpu) {
273 if (all_cpu_data[cpu])
274 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
278 static inline void update_turbo_state(void)
283 cpu = all_cpu_data[0];
284 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
285 limits->turbo_disabled =
286 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
287 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
290 static void intel_pstate_hwp_set(const struct cpumask *cpumask)
292 int min, hw_min, max, hw_max, cpu, range, adj_range;
295 rdmsrl(MSR_HWP_CAPABILITIES, cap);
296 hw_min = HWP_LOWEST_PERF(cap);
297 hw_max = HWP_HIGHEST_PERF(cap);
298 range = hw_max - hw_min;
300 for_each_cpu(cpu, cpumask) {
301 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
302 adj_range = limits->min_perf_pct * range / 100;
303 min = hw_min + adj_range;
304 value &= ~HWP_MIN_PERF(~0L);
305 value |= HWP_MIN_PERF(min);
307 adj_range = limits->max_perf_pct * range / 100;
308 max = hw_min + adj_range;
309 if (limits->no_turbo) {
310 hw_max = HWP_GUARANTEED_PERF(cap);
315 value &= ~HWP_MAX_PERF(~0L);
316 value |= HWP_MAX_PERF(max);
317 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
321 static void intel_pstate_hwp_set_online_cpus(void)
324 intel_pstate_hwp_set(cpu_online_mask);
328 /************************** debugfs begin ************************/
329 static int pid_param_set(void *data, u64 val)
332 intel_pstate_reset_all_pid();
336 static int pid_param_get(void *data, u64 *val)
341 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
348 static struct pid_param pid_files[] = {
349 {"sample_rate_ms", &pid_params.sample_rate_ms},
350 {"d_gain_pct", &pid_params.d_gain_pct},
351 {"i_gain_pct", &pid_params.i_gain_pct},
352 {"deadband", &pid_params.deadband},
353 {"setpoint", &pid_params.setpoint},
354 {"p_gain_pct", &pid_params.p_gain_pct},
358 static void __init intel_pstate_debug_expose_params(void)
360 struct dentry *debugfs_parent;
365 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
366 if (IS_ERR_OR_NULL(debugfs_parent))
368 while (pid_files[i].name) {
369 debugfs_create_file(pid_files[i].name, 0660,
370 debugfs_parent, pid_files[i].value,
376 /************************** debugfs end ************************/
378 /************************** sysfs begin ************************/
379 #define show_one(file_name, object) \
380 static ssize_t show_##file_name \
381 (struct kobject *kobj, struct attribute *attr, char *buf) \
383 return sprintf(buf, "%u\n", limits->object); \
386 static ssize_t show_turbo_pct(struct kobject *kobj,
387 struct attribute *attr, char *buf)
390 int total, no_turbo, turbo_pct;
393 cpu = all_cpu_data[0];
395 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
396 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
397 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
398 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
399 return sprintf(buf, "%u\n", turbo_pct);
402 static ssize_t show_num_pstates(struct kobject *kobj,
403 struct attribute *attr, char *buf)
408 cpu = all_cpu_data[0];
409 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
410 return sprintf(buf, "%u\n", total);
413 static ssize_t show_no_turbo(struct kobject *kobj,
414 struct attribute *attr, char *buf)
418 update_turbo_state();
419 if (limits->turbo_disabled)
420 ret = sprintf(buf, "%u\n", limits->turbo_disabled);
422 ret = sprintf(buf, "%u\n", limits->no_turbo);
427 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
428 const char *buf, size_t count)
433 ret = sscanf(buf, "%u", &input);
437 update_turbo_state();
438 if (limits->turbo_disabled) {
439 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
443 limits->no_turbo = clamp_t(int, input, 0, 1);
446 intel_pstate_hwp_set_online_cpus();
451 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
452 const char *buf, size_t count)
457 ret = sscanf(buf, "%u", &input);
461 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
462 limits->max_perf_pct = min(limits->max_policy_pct,
463 limits->max_sysfs_pct);
464 limits->max_perf_pct = max(limits->min_policy_pct,
465 limits->max_perf_pct);
466 limits->max_perf_pct = max(limits->min_perf_pct,
467 limits->max_perf_pct);
468 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
472 intel_pstate_hwp_set_online_cpus();
476 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
477 const char *buf, size_t count)
482 ret = sscanf(buf, "%u", &input);
486 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
487 limits->min_perf_pct = max(limits->min_policy_pct,
488 limits->min_sysfs_pct);
489 limits->min_perf_pct = min(limits->max_policy_pct,
490 limits->min_perf_pct);
491 limits->min_perf_pct = min(limits->max_perf_pct,
492 limits->min_perf_pct);
493 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
497 intel_pstate_hwp_set_online_cpus();
501 show_one(max_perf_pct, max_perf_pct);
502 show_one(min_perf_pct, min_perf_pct);
504 define_one_global_rw(no_turbo);
505 define_one_global_rw(max_perf_pct);
506 define_one_global_rw(min_perf_pct);
507 define_one_global_ro(turbo_pct);
508 define_one_global_ro(num_pstates);
510 static struct attribute *intel_pstate_attributes[] = {
519 static struct attribute_group intel_pstate_attr_group = {
520 .attrs = intel_pstate_attributes,
523 static void __init intel_pstate_sysfs_expose_params(void)
525 struct kobject *intel_pstate_kobject;
528 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
529 &cpu_subsys.dev_root->kobj);
530 BUG_ON(!intel_pstate_kobject);
531 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
534 /************************** sysfs end ************************/
536 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
538 /* First disable HWP notification interrupt as we don't process them */
539 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
541 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
544 static int atom_get_min_pstate(void)
548 rdmsrl(ATOM_RATIOS, value);
549 return (value >> 8) & 0x7F;
552 static int atom_get_max_pstate(void)
556 rdmsrl(ATOM_RATIOS, value);
557 return (value >> 16) & 0x7F;
560 static int atom_get_turbo_pstate(void)
564 rdmsrl(ATOM_TURBO_RATIOS, value);
568 static void atom_set_pstate(struct cpudata *cpudata, int pstate)
574 val = (u64)pstate << 8;
575 if (limits->no_turbo && !limits->turbo_disabled)
578 vid_fp = cpudata->vid.min + mul_fp(
579 int_tofp(pstate - cpudata->pstate.min_pstate),
582 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
583 vid = ceiling_fp(vid_fp);
585 if (pstate > cpudata->pstate.max_pstate)
586 vid = cpudata->vid.turbo;
590 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
593 static int silvermont_get_scaling(void)
597 /* Defined in Table 35-6 from SDM (Sept 2015) */
598 static int silvermont_freq_table[] = {
599 83300, 100000, 133300, 116700, 80000};
601 rdmsrl(MSR_FSB_FREQ, value);
605 return silvermont_freq_table[i];
608 static int airmont_get_scaling(void)
612 /* Defined in Table 35-10 from SDM (Sept 2015) */
613 static int airmont_freq_table[] = {
614 83300, 100000, 133300, 116700, 80000,
615 93300, 90000, 88900, 87500};
617 rdmsrl(MSR_FSB_FREQ, value);
621 return airmont_freq_table[i];
624 static void atom_get_vid(struct cpudata *cpudata)
628 rdmsrl(ATOM_VIDS, value);
629 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
630 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
631 cpudata->vid.ratio = div_fp(
632 cpudata->vid.max - cpudata->vid.min,
633 int_tofp(cpudata->pstate.max_pstate -
634 cpudata->pstate.min_pstate));
636 rdmsrl(ATOM_TURBO_VIDS, value);
637 cpudata->vid.turbo = value & 0x7f;
640 static int core_get_min_pstate(void)
644 rdmsrl(MSR_PLATFORM_INFO, value);
645 return (value >> 40) & 0xFF;
648 static int core_get_max_pstate_physical(void)
652 rdmsrl(MSR_PLATFORM_INFO, value);
653 return (value >> 8) & 0xFF;
656 static int core_get_max_pstate(void)
663 rdmsrl(MSR_PLATFORM_INFO, plat_info);
664 max_pstate = (plat_info >> 8) & 0xFF;
666 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
668 /* Do some sanity checking for safety */
669 if (plat_info & 0x600000000) {
674 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
678 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
679 err = rdmsrl_safe(tdp_msr, &tdp_ratio);
683 if (tdp_ratio - 1 == tar) {
685 pr_debug("max_pstate=TAC %x\n", max_pstate);
696 static int core_get_turbo_pstate(void)
701 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
702 nont = core_get_max_pstate();
709 static inline int core_get_scaling(void)
714 static void core_set_pstate(struct cpudata *cpudata, int pstate)
718 val = (u64)pstate << 8;
719 if (limits->no_turbo && !limits->turbo_disabled)
722 wrmsrl(MSR_IA32_PERF_CTL, val);
725 static int knl_get_turbo_pstate(void)
730 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
731 nont = core_get_max_pstate();
732 ret = (((value) >> 8) & 0xFF);
738 static struct cpu_defaults core_params = {
740 .sample_rate_ms = 10,
748 .get_max = core_get_max_pstate,
749 .get_max_physical = core_get_max_pstate_physical,
750 .get_min = core_get_min_pstate,
751 .get_turbo = core_get_turbo_pstate,
752 .get_scaling = core_get_scaling,
753 .set = core_set_pstate,
754 .get_target_pstate = get_target_pstate_use_performance,
758 static struct cpu_defaults silvermont_params = {
760 .sample_rate_ms = 10,
768 .get_max = atom_get_max_pstate,
769 .get_max_physical = atom_get_max_pstate,
770 .get_min = atom_get_min_pstate,
771 .get_turbo = atom_get_turbo_pstate,
772 .set = atom_set_pstate,
773 .get_scaling = silvermont_get_scaling,
774 .get_vid = atom_get_vid,
775 .get_target_pstate = get_target_pstate_use_cpu_load,
779 static struct cpu_defaults airmont_params = {
781 .sample_rate_ms = 10,
789 .get_max = atom_get_max_pstate,
790 .get_max_physical = atom_get_max_pstate,
791 .get_min = atom_get_min_pstate,
792 .get_turbo = atom_get_turbo_pstate,
793 .set = atom_set_pstate,
794 .get_scaling = airmont_get_scaling,
795 .get_vid = atom_get_vid,
796 .get_target_pstate = get_target_pstate_use_cpu_load,
800 static struct cpu_defaults knl_params = {
802 .sample_rate_ms = 10,
810 .get_max = core_get_max_pstate,
811 .get_max_physical = core_get_max_pstate_physical,
812 .get_min = core_get_min_pstate,
813 .get_turbo = knl_get_turbo_pstate,
814 .get_scaling = core_get_scaling,
815 .set = core_set_pstate,
816 .get_target_pstate = get_target_pstate_use_performance,
820 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
822 int max_perf = cpu->pstate.turbo_pstate;
826 if (limits->no_turbo || limits->turbo_disabled)
827 max_perf = cpu->pstate.max_pstate;
830 * performance can be limited by user through sysfs, by cpufreq
831 * policy, or by cpu specific default values determined through
834 max_perf_adj = fp_toint(max_perf * limits->max_perf);
835 *max = clamp_t(int, max_perf_adj,
836 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
838 min_perf = fp_toint(max_perf * limits->min_perf);
839 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
842 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
844 int max_perf, min_perf;
847 update_turbo_state();
849 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
851 pstate = clamp_t(int, pstate, min_perf, max_perf);
853 if (pstate == cpu->pstate.current_pstate)
856 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
858 cpu->pstate.current_pstate = pstate;
860 pstate_funcs.set(cpu, pstate);
863 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
865 cpu->pstate.min_pstate = pstate_funcs.get_min();
866 cpu->pstate.max_pstate = pstate_funcs.get_max();
867 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
868 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
869 cpu->pstate.scaling = pstate_funcs.get_scaling();
871 if (pstate_funcs.get_vid)
872 pstate_funcs.get_vid(cpu);
873 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
876 static inline void intel_pstate_calc_busy(struct cpudata *cpu)
878 struct sample *sample = &cpu->sample;
881 core_pct = int_tofp(sample->aperf) * int_tofp(100);
882 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
884 sample->core_pct_busy = (int32_t)core_pct;
887 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
893 local_irq_save(flags);
894 rdmsrl(MSR_IA32_APERF, aperf);
895 rdmsrl(MSR_IA32_MPERF, mperf);
897 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
898 local_irq_restore(flags);
901 local_irq_restore(flags);
903 cpu->last_sample_time = cpu->sample.time;
904 cpu->sample.time = time;
905 cpu->sample.aperf = aperf;
906 cpu->sample.mperf = mperf;
907 cpu->sample.tsc = tsc;
908 cpu->sample.aperf -= cpu->prev_aperf;
909 cpu->sample.mperf -= cpu->prev_mperf;
910 cpu->sample.tsc -= cpu->prev_tsc;
912 cpu->prev_aperf = aperf;
913 cpu->prev_mperf = mperf;
918 static inline int32_t get_avg_frequency(struct cpudata *cpu)
920 return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf *
921 cpu->pstate.scaling, cpu->sample.mperf);
924 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
926 struct sample *sample = &cpu->sample;
927 u64 cummulative_iowait, delta_iowait_us;
928 u64 delta_iowait_mperf;
932 cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now);
935 * Convert iowait time into number of IO cycles spent at max_freq.
936 * IO is considered as busy only for the cpu_load algorithm. For
937 * performance this is not needed since we always try to reach the
938 * maximum P-State, so we are already boosting the IOs.
940 delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait;
941 delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling *
942 cpu->pstate.max_pstate, MSEC_PER_SEC);
944 mperf = cpu->sample.mperf + delta_iowait_mperf;
945 cpu->prev_cummulative_iowait = cummulative_iowait;
948 * The load can be estimated as the ratio of the mperf counter
949 * running at a constant frequency during active periods
950 * (C0) and the time stamp counter running at the same frequency
951 * also during C-states.
953 cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc);
954 cpu->sample.busy_scaled = cpu_load;
956 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load);
959 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
961 int32_t core_busy, max_pstate, current_pstate, sample_ratio;
964 intel_pstate_calc_busy(cpu);
967 * core_busy is the ratio of actual performance to max
968 * max_pstate is the max non turbo pstate available
969 * current_pstate was the pstate that was requested during
970 * the last sample period.
972 * We normalize core_busy, which was our actual percent
973 * performance to what we requested during the last sample
974 * period. The result will be a percentage of busy at a
977 core_busy = cpu->sample.core_pct_busy;
978 max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
979 current_pstate = int_tofp(cpu->pstate.current_pstate);
980 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
983 * Since our utilization update callback will not run unless we are
984 * in C0, check if the actual elapsed time is significantly greater (3x)
985 * than our sample interval. If it is, then we were idle for a long
986 * enough period of time to adjust our busyness.
988 duration_ns = cpu->sample.time - cpu->last_sample_time;
989 if ((s64)duration_ns > pid_params.sample_rate_ns * 3
990 && cpu->last_sample_time > 0) {
991 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
992 int_tofp(duration_ns));
993 core_busy = mul_fp(core_busy, sample_ratio);
996 cpu->sample.busy_scaled = core_busy;
997 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy);
1000 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
1002 int from, target_pstate;
1003 struct sample *sample;
1005 from = cpu->pstate.current_pstate;
1007 target_pstate = pstate_funcs.get_target_pstate(cpu);
1009 intel_pstate_set_pstate(cpu, target_pstate, true);
1011 sample = &cpu->sample;
1012 trace_pstate_sample(fp_toint(sample->core_pct_busy),
1013 fp_toint(sample->busy_scaled),
1015 cpu->pstate.current_pstate,
1019 get_avg_frequency(cpu));
1022 static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1023 unsigned long util, unsigned long max)
1025 struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1026 u64 delta_ns = time - cpu->sample.time;
1028 if ((s64)delta_ns >= pid_params.sample_rate_ns) {
1029 bool sample_taken = intel_pstate_sample(cpu, time);
1031 if (sample_taken && !hwp_active)
1032 intel_pstate_adjust_busy_pstate(cpu);
1036 #define ICPU(model, policy) \
1037 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1038 (unsigned long)&policy }
1040 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1041 ICPU(0x2a, core_params),
1042 ICPU(0x2d, core_params),
1043 ICPU(0x37, silvermont_params),
1044 ICPU(0x3a, core_params),
1045 ICPU(0x3c, core_params),
1046 ICPU(0x3d, core_params),
1047 ICPU(0x3e, core_params),
1048 ICPU(0x3f, core_params),
1049 ICPU(0x45, core_params),
1050 ICPU(0x46, core_params),
1051 ICPU(0x47, core_params),
1052 ICPU(0x4c, airmont_params),
1053 ICPU(0x4e, core_params),
1054 ICPU(0x4f, core_params),
1055 ICPU(0x5e, core_params),
1056 ICPU(0x56, core_params),
1057 ICPU(0x57, knl_params),
1060 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
1062 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
1063 ICPU(0x56, core_params),
1067 static int intel_pstate_init_cpu(unsigned int cpunum)
1069 struct cpudata *cpu;
1071 if (!all_cpu_data[cpunum])
1072 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
1074 if (!all_cpu_data[cpunum])
1077 cpu = all_cpu_data[cpunum];
1082 intel_pstate_hwp_enable(cpu);
1083 pid_params.sample_rate_ms = 50;
1084 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
1087 intel_pstate_get_cpu_pstates(cpu);
1089 intel_pstate_busy_pid_reset(cpu);
1090 intel_pstate_sample(cpu, 0);
1092 cpu->update_util.func = intel_pstate_update_util;
1093 cpufreq_set_update_util_data(cpunum, &cpu->update_util);
1095 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
1100 static unsigned int intel_pstate_get(unsigned int cpu_num)
1102 struct sample *sample;
1103 struct cpudata *cpu;
1105 cpu = all_cpu_data[cpu_num];
1108 sample = &cpu->sample;
1109 return get_avg_frequency(cpu);
1112 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1114 if (!policy->cpuinfo.max_freq)
1117 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
1118 policy->max >= policy->cpuinfo.max_freq) {
1119 pr_debug("intel_pstate: set performance\n");
1120 limits = &performance_limits;
1122 intel_pstate_hwp_set(policy->cpus);
1126 pr_debug("intel_pstate: set powersave\n");
1127 limits = &powersave_limits;
1128 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1129 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
1130 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
1131 policy->cpuinfo.max_freq);
1132 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
1134 /* Normalize user input to [min_policy_pct, max_policy_pct] */
1135 limits->min_perf_pct = max(limits->min_policy_pct,
1136 limits->min_sysfs_pct);
1137 limits->min_perf_pct = min(limits->max_policy_pct,
1138 limits->min_perf_pct);
1139 limits->max_perf_pct = min(limits->max_policy_pct,
1140 limits->max_sysfs_pct);
1141 limits->max_perf_pct = max(limits->min_policy_pct,
1142 limits->max_perf_pct);
1143 limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
1145 /* Make sure min_perf_pct <= max_perf_pct */
1146 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
1148 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
1150 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1154 intel_pstate_hwp_set(policy->cpus);
1159 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
1161 cpufreq_verify_within_cpu_limits(policy);
1163 if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
1164 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
1170 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
1172 int cpu_num = policy->cpu;
1173 struct cpudata *cpu = all_cpu_data[cpu_num];
1175 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
1177 cpufreq_set_update_util_data(cpu_num, NULL);
1178 synchronize_sched();
1183 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
1186 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1188 struct cpudata *cpu;
1191 rc = intel_pstate_init_cpu(policy->cpu);
1195 cpu = all_cpu_data[policy->cpu];
1197 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
1198 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1200 policy->policy = CPUFREQ_POLICY_POWERSAVE;
1202 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
1203 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1205 /* cpuinfo and default policy values */
1206 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1207 policy->cpuinfo.max_freq =
1208 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1209 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1210 cpumask_set_cpu(policy->cpu, policy->cpus);
1215 static struct cpufreq_driver intel_pstate_driver = {
1216 .flags = CPUFREQ_CONST_LOOPS,
1217 .verify = intel_pstate_verify_policy,
1218 .setpolicy = intel_pstate_set_policy,
1219 .get = intel_pstate_get,
1220 .init = intel_pstate_cpu_init,
1221 .stop_cpu = intel_pstate_stop_cpu,
1222 .name = "intel_pstate",
1225 static int __initdata no_load;
1226 static int __initdata no_hwp;
1227 static int __initdata hwp_only;
1228 static unsigned int force_load;
1230 static int intel_pstate_msrs_not_valid(void)
1232 if (!pstate_funcs.get_max() ||
1233 !pstate_funcs.get_min() ||
1234 !pstate_funcs.get_turbo())
1240 static void copy_pid_params(struct pstate_adjust_policy *policy)
1242 pid_params.sample_rate_ms = policy->sample_rate_ms;
1243 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
1244 pid_params.p_gain_pct = policy->p_gain_pct;
1245 pid_params.i_gain_pct = policy->i_gain_pct;
1246 pid_params.d_gain_pct = policy->d_gain_pct;
1247 pid_params.deadband = policy->deadband;
1248 pid_params.setpoint = policy->setpoint;
1251 static void copy_cpu_funcs(struct pstate_funcs *funcs)
1253 pstate_funcs.get_max = funcs->get_max;
1254 pstate_funcs.get_max_physical = funcs->get_max_physical;
1255 pstate_funcs.get_min = funcs->get_min;
1256 pstate_funcs.get_turbo = funcs->get_turbo;
1257 pstate_funcs.get_scaling = funcs->get_scaling;
1258 pstate_funcs.set = funcs->set;
1259 pstate_funcs.get_vid = funcs->get_vid;
1260 pstate_funcs.get_target_pstate = funcs->get_target_pstate;
1264 #if IS_ENABLED(CONFIG_ACPI)
1265 #include <acpi/processor.h>
1267 static bool intel_pstate_no_acpi_pss(void)
1271 for_each_possible_cpu(i) {
1273 union acpi_object *pss;
1274 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1275 struct acpi_processor *pr = per_cpu(processors, i);
1280 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
1281 if (ACPI_FAILURE(status))
1284 pss = buffer.pointer;
1285 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
1296 static bool intel_pstate_has_acpi_ppc(void)
1300 for_each_possible_cpu(i) {
1301 struct acpi_processor *pr = per_cpu(processors, i);
1305 if (acpi_has_method(pr->handle, "_PPC"))
1316 struct hw_vendor_info {
1318 char oem_id[ACPI_OEM_ID_SIZE];
1319 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
1323 /* Hardware vendor-specific info that has its own power management modes */
1324 static struct hw_vendor_info vendor_info[] = {
1325 {1, "HP ", "ProLiant", PSS},
1326 {1, "ORACLE", "X4-2 ", PPC},
1327 {1, "ORACLE", "X4-2L ", PPC},
1328 {1, "ORACLE", "X4-2B ", PPC},
1329 {1, "ORACLE", "X3-2 ", PPC},
1330 {1, "ORACLE", "X3-2L ", PPC},
1331 {1, "ORACLE", "X3-2B ", PPC},
1332 {1, "ORACLE", "X4470M2 ", PPC},
1333 {1, "ORACLE", "X4270M3 ", PPC},
1334 {1, "ORACLE", "X4270M2 ", PPC},
1335 {1, "ORACLE", "X4170M2 ", PPC},
1336 {1, "ORACLE", "X4170 M3", PPC},
1337 {1, "ORACLE", "X4275 M3", PPC},
1338 {1, "ORACLE", "X6-2 ", PPC},
1339 {1, "ORACLE", "Sudbury ", PPC},
1343 static bool intel_pstate_platform_pwr_mgmt_exists(void)
1345 struct acpi_table_header hdr;
1346 struct hw_vendor_info *v_info;
1347 const struct x86_cpu_id *id;
1350 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1352 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1353 if ( misc_pwr & (1 << 8))
1357 if (acpi_disabled ||
1358 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
1361 for (v_info = vendor_info; v_info->valid; v_info++) {
1362 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
1363 !strncmp(hdr.oem_table_id, v_info->oem_table_id,
1364 ACPI_OEM_TABLE_ID_SIZE))
1365 switch (v_info->oem_pwr_table) {
1367 return intel_pstate_no_acpi_pss();
1369 return intel_pstate_has_acpi_ppc() &&
1376 #else /* CONFIG_ACPI not enabled */
1377 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
1378 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
1379 #endif /* CONFIG_ACPI */
1381 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
1382 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
1386 static int __init intel_pstate_init(void)
1389 const struct x86_cpu_id *id;
1390 struct cpu_defaults *cpu_def;
1395 if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
1396 copy_cpu_funcs(&core_params.funcs);
1398 goto hwp_cpu_matched;
1401 id = x86_match_cpu(intel_pstate_cpu_ids);
1405 cpu_def = (struct cpu_defaults *)id->driver_data;
1407 copy_pid_params(&cpu_def->pid_policy);
1408 copy_cpu_funcs(&cpu_def->funcs);
1410 if (intel_pstate_msrs_not_valid())
1415 * The Intel pstate driver will be ignored if the platform
1416 * firmware has its own power management modes.
1418 if (intel_pstate_platform_pwr_mgmt_exists())
1421 pr_info("Intel P-state driver initializing.\n");
1423 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
1427 if (!hwp_active && hwp_only)
1430 rc = cpufreq_register_driver(&intel_pstate_driver);
1434 intel_pstate_debug_expose_params();
1435 intel_pstate_sysfs_expose_params();
1438 pr_info("intel_pstate: HWP enabled\n");
1443 for_each_online_cpu(cpu) {
1444 if (all_cpu_data[cpu]) {
1445 cpufreq_set_update_util_data(cpu, NULL);
1446 synchronize_sched();
1447 kfree(all_cpu_data[cpu]);
1452 vfree(all_cpu_data);
1455 device_initcall(intel_pstate_init);
1457 static int __init intel_pstate_setup(char *str)
1462 if (!strcmp(str, "disable"))
1464 if (!strcmp(str, "no_hwp")) {
1465 pr_info("intel_pstate: HWP disabled\n");
1468 if (!strcmp(str, "force"))
1470 if (!strcmp(str, "hwp_only"))
1474 early_param("intel_pstate", intel_pstate_setup);
1476 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1477 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1478 MODULE_LICENSE("GPL");