Commit | Line | Data |
---|---|---|
6ee97d35 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2ef7a295 JL |
2 | /* |
3 | * Arch specific cpu topology information | |
4 | * | |
5 | * Copyright (C) 2016, ARM Ltd. | |
6 | * Written by: Juri Lelli, ARM Ltd. | |
2ef7a295 JL |
7 | */ |
8 | ||
9 | #include <linux/acpi.h> | |
615ffd63 | 10 | #include <linux/arch_topology.h> |
2ef7a295 JL |
11 | #include <linux/cpu.h> |
12 | #include <linux/cpufreq.h> | |
13 | #include <linux/device.h> | |
14 | #include <linux/of.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/string.h> | |
17 | #include <linux/sched/topology.h> | |
bb1fbdd3 | 18 | #include <linux/cpuset.h> |
2ef7a295 | 19 | |
0e27c567 | 20 | DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; |
2ef7a295 | 21 | |
0e27c567 DE |
22 | void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, |
23 | unsigned long max_freq) | |
2ef7a295 | 24 | { |
0e27c567 DE |
25 | unsigned long scale; |
26 | int i; | |
27 | ||
28 | scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; | |
29 | ||
30 | for_each_cpu(i, cpus) | |
31 | per_cpu(freq_scale, i) = scale; | |
2ef7a295 JL |
32 | } |
33 | ||
2ef7a295 | 34 | static DEFINE_MUTEX(cpu_scale_mutex); |
8216f588 | 35 | DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; |
2ef7a295 | 36 | |
4ca4f26a | 37 | void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) |
2ef7a295 JL |
38 | { |
39 | per_cpu(cpu_scale, cpu) = capacity; | |
40 | } | |
41 | ||
42 | static ssize_t cpu_capacity_show(struct device *dev, | |
43 | struct device_attribute *attr, | |
44 | char *buf) | |
45 | { | |
46 | struct cpu *cpu = container_of(dev, struct cpu, dev); | |
47 | ||
3eeba1a2 | 48 | return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id)); |
2ef7a295 JL |
49 | } |
50 | ||
bb1fbdd3 MR |
51 | static void update_topology_flags_workfn(struct work_struct *work); |
52 | static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); | |
53 | ||
2ef7a295 JL |
54 | static ssize_t cpu_capacity_store(struct device *dev, |
55 | struct device_attribute *attr, | |
56 | const char *buf, | |
57 | size_t count) | |
58 | { | |
59 | struct cpu *cpu = container_of(dev, struct cpu, dev); | |
60 | int this_cpu = cpu->dev.id; | |
61 | int i; | |
62 | unsigned long new_capacity; | |
63 | ssize_t ret; | |
64 | ||
65 | if (!count) | |
66 | return 0; | |
67 | ||
68 | ret = kstrtoul(buf, 0, &new_capacity); | |
69 | if (ret) | |
70 | return ret; | |
71 | if (new_capacity > SCHED_CAPACITY_SCALE) | |
72 | return -EINVAL; | |
73 | ||
74 | mutex_lock(&cpu_scale_mutex); | |
75 | for_each_cpu(i, &cpu_topology[this_cpu].core_sibling) | |
4ca4f26a | 76 | topology_set_cpu_scale(i, new_capacity); |
2ef7a295 JL |
77 | mutex_unlock(&cpu_scale_mutex); |
78 | ||
bb1fbdd3 MR |
79 | schedule_work(&update_topology_flags_work); |
80 | ||
2ef7a295 JL |
81 | return count; |
82 | } | |
83 | ||
84 | static DEVICE_ATTR_RW(cpu_capacity); | |
85 | ||
86 | static int register_cpu_capacity_sysctl(void) | |
87 | { | |
88 | int i; | |
89 | struct device *cpu; | |
90 | ||
91 | for_each_possible_cpu(i) { | |
92 | cpu = get_cpu_device(i); | |
93 | if (!cpu) { | |
94 | pr_err("%s: too early to get CPU%d device!\n", | |
95 | __func__, i); | |
96 | continue; | |
97 | } | |
98 | device_create_file(cpu, &dev_attr_cpu_capacity); | |
99 | } | |
100 | ||
101 | return 0; | |
102 | } | |
103 | subsys_initcall(register_cpu_capacity_sysctl); | |
104 | ||
bb1fbdd3 MR |
105 | static int update_topology; |
106 | ||
107 | int topology_update_cpu_topology(void) | |
108 | { | |
109 | return update_topology; | |
110 | } | |
111 | ||
112 | /* | |
113 | * Updating the sched_domains can't be done directly from cpufreq callbacks | |
114 | * due to locking, so queue the work for later. | |
115 | */ | |
116 | static void update_topology_flags_workfn(struct work_struct *work) | |
117 | { | |
118 | update_topology = 1; | |
119 | rebuild_sched_domains(); | |
120 | pr_debug("sched_domain hierarchy rebuilt, flags updated\n"); | |
121 | update_topology = 0; | |
122 | } | |
123 | ||
2ef7a295 JL |
124 | static u32 capacity_scale; |
125 | static u32 *raw_capacity; | |
62de1161 | 126 | |
82d8ba71 | 127 | static int free_raw_capacity(void) |
62de1161 VK |
128 | { |
129 | kfree(raw_capacity); | |
130 | raw_capacity = NULL; | |
131 | ||
132 | return 0; | |
133 | } | |
2ef7a295 | 134 | |
4ca4f26a | 135 | void topology_normalize_cpu_scale(void) |
2ef7a295 JL |
136 | { |
137 | u64 capacity; | |
138 | int cpu; | |
139 | ||
62de1161 | 140 | if (!raw_capacity) |
2ef7a295 JL |
141 | return; |
142 | ||
143 | pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); | |
144 | mutex_lock(&cpu_scale_mutex); | |
145 | for_each_possible_cpu(cpu) { | |
146 | pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n", | |
147 | cpu, raw_capacity[cpu]); | |
148 | capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) | |
149 | / capacity_scale; | |
4ca4f26a | 150 | topology_set_cpu_scale(cpu, capacity); |
2ef7a295 | 151 | pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", |
4ca4f26a | 152 | cpu, topology_get_cpu_scale(NULL, cpu)); |
2ef7a295 JL |
153 | } |
154 | mutex_unlock(&cpu_scale_mutex); | |
155 | } | |
156 | ||
805df296 | 157 | bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) |
2ef7a295 | 158 | { |
62de1161 | 159 | static bool cap_parsing_failed; |
805df296 | 160 | int ret; |
2ef7a295 JL |
161 | u32 cpu_capacity; |
162 | ||
163 | if (cap_parsing_failed) | |
805df296 | 164 | return false; |
2ef7a295 | 165 | |
3eeba1a2 | 166 | ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", |
2ef7a295 JL |
167 | &cpu_capacity); |
168 | if (!ret) { | |
169 | if (!raw_capacity) { | |
170 | raw_capacity = kcalloc(num_possible_cpus(), | |
171 | sizeof(*raw_capacity), | |
172 | GFP_KERNEL); | |
173 | if (!raw_capacity) { | |
174 | pr_err("cpu_capacity: failed to allocate memory for raw capacities\n"); | |
175 | cap_parsing_failed = true; | |
805df296 | 176 | return false; |
2ef7a295 JL |
177 | } |
178 | } | |
179 | capacity_scale = max(cpu_capacity, capacity_scale); | |
180 | raw_capacity[cpu] = cpu_capacity; | |
6ef2541f RH |
181 | pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n", |
182 | cpu_node, raw_capacity[cpu]); | |
2ef7a295 JL |
183 | } else { |
184 | if (raw_capacity) { | |
6ef2541f RH |
185 | pr_err("cpu_capacity: missing %pOF raw capacity\n", |
186 | cpu_node); | |
2ef7a295 JL |
187 | pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); |
188 | } | |
189 | cap_parsing_failed = true; | |
62de1161 | 190 | free_raw_capacity(); |
2ef7a295 JL |
191 | } |
192 | ||
193 | return !ret; | |
194 | } | |
195 | ||
196 | #ifdef CONFIG_CPU_FREQ | |
9de9a449 GI |
197 | static cpumask_var_t cpus_to_visit; |
198 | static void parsing_done_workfn(struct work_struct *work); | |
199 | static DECLARE_WORK(parsing_done_work, parsing_done_workfn); | |
2ef7a295 | 200 | |
9de9a449 | 201 | static int |
2ef7a295 JL |
202 | init_cpu_capacity_callback(struct notifier_block *nb, |
203 | unsigned long val, | |
204 | void *data) | |
205 | { | |
206 | struct cpufreq_policy *policy = data; | |
207 | int cpu; | |
208 | ||
d8bcf4db | 209 | if (!raw_capacity) |
2ef7a295 JL |
210 | return 0; |
211 | ||
93a57081 VK |
212 | if (val != CPUFREQ_NOTIFY) |
213 | return 0; | |
214 | ||
215 | pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", | |
216 | cpumask_pr_args(policy->related_cpus), | |
217 | cpumask_pr_args(cpus_to_visit)); | |
218 | ||
219 | cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); | |
220 | ||
221 | for_each_cpu(cpu, policy->related_cpus) { | |
222 | raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) * | |
223 | policy->cpuinfo.max_freq / 1000UL; | |
224 | capacity_scale = max(raw_capacity[cpu], capacity_scale); | |
2ef7a295 | 225 | } |
93a57081 VK |
226 | |
227 | if (cpumask_empty(cpus_to_visit)) { | |
228 | topology_normalize_cpu_scale(); | |
bb1fbdd3 | 229 | schedule_work(&update_topology_flags_work); |
62de1161 | 230 | free_raw_capacity(); |
93a57081 | 231 | pr_debug("cpu_capacity: parsing done\n"); |
93a57081 VK |
232 | schedule_work(&parsing_done_work); |
233 | } | |
234 | ||
2ef7a295 JL |
235 | return 0; |
236 | } | |
237 | ||
9de9a449 | 238 | static struct notifier_block init_cpu_capacity_notifier = { |
2ef7a295 JL |
239 | .notifier_call = init_cpu_capacity_callback, |
240 | }; | |
241 | ||
242 | static int __init register_cpufreq_notifier(void) | |
243 | { | |
5408211a DE |
244 | int ret; |
245 | ||
2ef7a295 JL |
246 | /* |
247 | * on ACPI-based systems we need to use the default cpu capacity | |
248 | * until we have the necessary code to parse the cpu capacity, so | |
249 | * skip registering cpufreq notifier. | |
250 | */ | |
c105aa31 | 251 | if (!acpi_disabled || !raw_capacity) |
2ef7a295 JL |
252 | return -EINVAL; |
253 | ||
254 | if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) { | |
255 | pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n"); | |
256 | return -ENOMEM; | |
257 | } | |
258 | ||
259 | cpumask_copy(cpus_to_visit, cpu_possible_mask); | |
260 | ||
5408211a DE |
261 | ret = cpufreq_register_notifier(&init_cpu_capacity_notifier, |
262 | CPUFREQ_POLICY_NOTIFIER); | |
263 | ||
264 | if (ret) | |
265 | free_cpumask_var(cpus_to_visit); | |
266 | ||
267 | return ret; | |
2ef7a295 JL |
268 | } |
269 | core_initcall(register_cpufreq_notifier); | |
270 | ||
9de9a449 | 271 | static void parsing_done_workfn(struct work_struct *work) |
2ef7a295 JL |
272 | { |
273 | cpufreq_unregister_notifier(&init_cpu_capacity_notifier, | |
274 | CPUFREQ_POLICY_NOTIFIER); | |
5408211a | 275 | free_cpumask_var(cpus_to_visit); |
2ef7a295 JL |
276 | } |
277 | ||
278 | #else | |
2ef7a295 JL |
279 | core_initcall(free_raw_capacity); |
280 | #endif |