cpufreq: Cancel policy update work scheduled before freeing
[linux-block.git] / drivers / acpi / processor_thermal.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 * - Added processor hotplug support
1da177e4
LT
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/cpufreq.h>
8b48463f 16#include <linux/acpi.h>
1da177e4 17#include <acpi/processor.h>
7c0f6ba6 18#include <linux/uaccess.h>
1da177e4 19
a192a958
LB
20#define PREFIX "ACPI: "
21
1da177e4 22#define ACPI_PROCESSOR_CLASS "processor"
1da177e4 23#define _COMPONENT ACPI_PROCESSOR_COMPONENT
f52fd66d 24ACPI_MODULE_NAME("processor_thermal");
1da177e4 25
1da177e4
LT
26#ifdef CONFIG_CPU_FREQ
27
28/* If a passive cooling situation is detected, primarily CPUfreq is used, as it
29 * offers (in most cases) voltage scaling in addition to frequency scaling, and
30 * thus a cubic (instead of linear) reduction of energy. Also, we allow for
31 * _any_ cpufreq driver and not only the acpi-cpufreq driver.
32 */
33
d9460fd2
ZR
34#define CPUFREQ_THERMAL_MIN_STEP 0
35#define CPUFREQ_THERMAL_MAX_STEP 3
36
c938ac21 37static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
1da177e4 38
2815ab92
AK
39#define reduction_pctg(cpu) \
40 per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
41
42/*
43 * Emulate "per package data" using per cpu data (which should really be
44 * provided elsewhere)
45 *
46 * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
47 * temporarily. Fortunately that's not a big issue here (I hope)
48 */
49static int phys_package_first_cpu(int cpu)
50{
51 int i;
52 int id = topology_physical_package_id(cpu);
53
54 for_each_online_cpu(i)
55 if (topology_physical_package_id(i) == id)
56 return i;
57 return 0;
58}
59
1da177e4
LT
60static int cpu_has_cpufreq(unsigned int cpu)
61{
62 struct cpufreq_policy policy;
d15ce412 63 if (!acpi_processor_cpufreq_init || cpufreq_get_policy(&policy, cpu))
75b245b3
TR
64 return 0;
65 return 1;
1da177e4
LT
66}
67
d9460fd2
ZR
68static int cpufreq_get_max_state(unsigned int cpu)
69{
70 if (!cpu_has_cpufreq(cpu))
71 return 0;
72
73 return CPUFREQ_THERMAL_MAX_STEP;
74}
75
76static int cpufreq_get_cur_state(unsigned int cpu)
77{
78 if (!cpu_has_cpufreq(cpu))
79 return 0;
80
2815ab92 81 return reduction_pctg(cpu);
d9460fd2
ZR
82}
83
84static int cpufreq_set_cur_state(unsigned int cpu, int state)
85{
d15ce412
VK
86 struct cpufreq_policy *policy;
87 struct acpi_processor *pr;
88 unsigned long max_freq;
89 int i, ret;
2815ab92 90
d9460fd2
ZR
91 if (!cpu_has_cpufreq(cpu))
92 return 0;
93
2815ab92
AK
94 reduction_pctg(cpu) = state;
95
96 /*
97 * Update all the CPUs in the same package because they all
98 * contribute to the temperature and often share the same
99 * frequency.
100 */
101 for_each_online_cpu(i) {
d15ce412 102 if (topology_physical_package_id(i) !=
2815ab92 103 topology_physical_package_id(cpu))
d15ce412
VK
104 continue;
105
106 pr = per_cpu(processors, i);
107
3000ce3c 108 if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
d15ce412
VK
109 continue;
110
111 policy = cpufreq_cpu_get(i);
112 if (!policy)
113 return -EINVAL;
114
115 max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100;
116
117 cpufreq_cpu_put(policy);
118
3000ce3c 119 ret = freq_qos_update_request(&pr->thermal_req, max_freq);
d15ce412
VK
120 if (ret < 0) {
121 pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
122 pr->id, ret);
123 }
2815ab92 124 }
d9460fd2
ZR
125 return 0;
126}
127
3000ce3c 128void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
4be44fcd 129{
3000ce3c 130 int cpu = policy->cpu;
d15ce412
VK
131 struct acpi_processor *pr = per_cpu(processors, cpu);
132 int ret;
133
2d8b39a6
RW
134 if (!pr)
135 return;
136
3000ce3c
RW
137 ret = freq_qos_add_request(&policy->constraints, &pr->thermal_req,
138 FREQ_QOS_MAX, INT_MAX);
2d8b39a6 139 if (ret < 0)
d15ce412
VK
140 pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
141 ret);
1da177e4
LT
142}
143
3000ce3c 144void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
4be44fcd 145{
3000ce3c 146 struct acpi_processor *pr = per_cpu(processors, policy->cpu);
1da177e4 147
2d8b39a6 148 if (pr)
3000ce3c 149 freq_qos_remove_request(&pr->thermal_req);
1da177e4 150}
4be44fcd 151#else /* ! CONFIG_CPU_FREQ */
d9460fd2
ZR
152static int cpufreq_get_max_state(unsigned int cpu)
153{
154 return 0;
155}
156
157static int cpufreq_get_cur_state(unsigned int cpu)
158{
159 return 0;
160}
161
162static int cpufreq_set_cur_state(unsigned int cpu, int state)
163{
164 return 0;
165}
1da177e4 166
1da177e4
LT
167#endif
168
6dd7aca8 169/* thermal cooling device callbacks */
d9460fd2
ZR
170static int acpi_processor_max_state(struct acpi_processor *pr)
171{
172 int max_state = 0;
173
174 /*
175 * There exists four states according to
6dd7aca8 176 * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3
d9460fd2
ZR
177 */
178 max_state += cpufreq_get_max_state(pr->id);
179 if (pr->flags.throttling)
180 max_state += (pr->throttling.state_count -1);
181
182 return max_state;
183}
184static int
6503e5df
MG
185processor_get_max_state(struct thermal_cooling_device *cdev,
186 unsigned long *state)
d9460fd2
ZR
187{
188 struct acpi_device *device = cdev->devdata;
99aa3638 189 struct acpi_processor *pr;
d9460fd2 190
99aa3638
CIK
191 if (!device)
192 return -EINVAL;
193
194 pr = acpi_driver_data(device);
195 if (!pr)
d9460fd2
ZR
196 return -EINVAL;
197
6503e5df
MG
198 *state = acpi_processor_max_state(pr);
199 return 0;
d9460fd2
ZR
200}
201
202static int
6503e5df
MG
203processor_get_cur_state(struct thermal_cooling_device *cdev,
204 unsigned long *cur_state)
d9460fd2
ZR
205{
206 struct acpi_device *device = cdev->devdata;
99aa3638 207 struct acpi_processor *pr;
d9460fd2 208
99aa3638
CIK
209 if (!device)
210 return -EINVAL;
211
212 pr = acpi_driver_data(device);
213 if (!pr)
d9460fd2
ZR
214 return -EINVAL;
215
6503e5df 216 *cur_state = cpufreq_get_cur_state(pr->id);
d9460fd2 217 if (pr->flags.throttling)
6503e5df
MG
218 *cur_state += pr->throttling.state;
219 return 0;
d9460fd2
ZR
220}
221
222static int
6503e5df
MG
223processor_set_cur_state(struct thermal_cooling_device *cdev,
224 unsigned long state)
d9460fd2
ZR
225{
226 struct acpi_device *device = cdev->devdata;
99aa3638 227 struct acpi_processor *pr;
d9460fd2
ZR
228 int result = 0;
229 int max_pstate;
230
99aa3638
CIK
231 if (!device)
232 return -EINVAL;
233
234 pr = acpi_driver_data(device);
235 if (!pr)
d9460fd2
ZR
236 return -EINVAL;
237
238 max_pstate = cpufreq_get_max_state(pr->id);
239
240 if (state > acpi_processor_max_state(pr))
241 return -EINVAL;
242
243 if (state <= max_pstate) {
244 if (pr->flags.throttling && pr->throttling.state)
2a908002 245 result = acpi_processor_set_throttling(pr, 0, false);
d9460fd2
ZR
246 cpufreq_set_cur_state(pr->id, state);
247 } else {
248 cpufreq_set_cur_state(pr->id, max_pstate);
249 result = acpi_processor_set_throttling(pr,
2a908002 250 state - max_pstate, false);
d9460fd2
ZR
251 }
252 return result;
253}
254
9c8b04be 255const struct thermal_cooling_device_ops processor_cooling_ops = {
d9460fd2
ZR
256 .get_max_state = processor_get_max_state,
257 .get_cur_state = processor_get_cur_state,
258 .set_cur_state = processor_set_cur_state,
259};