arm: Use common cpu_topology structure and functions.
[linux-2.6-block.git] / arch / arm / kernel / topology.c
CommitLineData
c9018aab
VG
1/*
2 * arch/arm/kernel/topology.c
3 *
4 * Copyright (C) 2011 Linaro Limited.
5 * Written by: Vincent Guittot
6 *
7 * based on arch/sh/kernel/topology.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
615ffd63 14#include <linux/arch_topology.h>
c9018aab 15#include <linux/cpu.h>
06073ee2 16#include <linux/cpufreq.h>
c9018aab 17#include <linux/cpumask.h>
92bdd3f5 18#include <linux/export.h>
c9018aab
VG
19#include <linux/init.h>
20#include <linux/percpu.h>
21#include <linux/node.h>
22#include <linux/nodemask.h>
339ca09d 23#include <linux/of.h>
c9018aab 24#include <linux/sched.h>
105ab3d8 25#include <linux/sched/topology.h>
339ca09d 26#include <linux/slab.h>
7e5930aa 27#include <linux/string.h>
c9018aab 28
7e5930aa 29#include <asm/cpu.h>
c9018aab
VG
30#include <asm/cputype.h>
31#include <asm/topology.h>
32
130d9aab 33/*
ca8ce3d0 34 * cpu capacity scale management
130d9aab
VG
35 */
36
37/*
ca8ce3d0 38 * cpu capacity table
130d9aab
VG
39 * This per cpu data structure describes the relative capacity of each core.
40 * On a heteregenous system, cores don't have the same computation capacity
ca8ce3d0
NP
41 * and we reflect that difference in the cpu_capacity field so the scheduler
42 * can take this difference into account during load balance. A per cpu
43 * structure is preferred because each CPU updates its own cpu_capacity field
44 * during the load balance except for idle cores. One idle core is selected
45 * to run the rebalance_domains for all idle cores and the cpu_capacity can be
46 * updated during this sequence.
130d9aab 47 */
130d9aab 48
339ca09d
VG
49#ifdef CONFIG_OF
50struct cpu_efficiency {
51 const char *compatible;
52 unsigned long efficiency;
53};
54
55/*
56 * Table of relative efficiency of each processors
57 * The efficiency value must fit in 20bit and the final
58 * cpu_scale value must be in the range
ca8ce3d0 59 * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2
339ca09d
VG
60 * in order to return at most 1 when DIV_ROUND_CLOSEST
61 * is used to compute the capacity of a CPU.
62 * Processors that are not defined in the table,
ca8ce3d0 63 * use the default SCHED_CAPACITY_SCALE value for cpu_scale.
339ca09d 64 */
145bc292 65static const struct cpu_efficiency table_efficiency[] = {
339ca09d
VG
66 {"arm,cortex-a15", 3891},
67 {"arm,cortex-a7", 2048},
68 {NULL, },
69};
70
145bc292 71static unsigned long *__cpu_capacity;
816a8de0 72#define cpu_capacity(cpu) __cpu_capacity[cpu]
339ca09d 73
145bc292 74static unsigned long middle_capacity = 1;
06073ee2 75static bool cap_from_dt = true;
339ca09d
VG
76
77/*
78 * Iterate all CPUs' descriptor in DT and compute the efficiency
79 * (as per table_efficiency). Also calculate a middle efficiency
80 * as close as possible to (max{eff_i} - min{eff_i}) / 2
ca8ce3d0
NP
81 * This is later used to scale the cpu_capacity field such that an
82 * 'average' CPU is of middle capacity. Also see the comments near
83 * table_efficiency[] and update_cpu_capacity().
339ca09d
VG
84 */
85static void __init parse_dt_topology(void)
86{
145bc292 87 const struct cpu_efficiency *cpu_eff;
339ca09d 88 struct device_node *cn = NULL;
44ae903b 89 unsigned long min_capacity = ULONG_MAX;
339ca09d
VG
90 unsigned long max_capacity = 0;
91 unsigned long capacity = 0;
44ae903b 92 int cpu = 0;
339ca09d 93
44ae903b
MB
94 __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
95 GFP_NOWAIT);
339ca09d 96
816a8de0
SK
97 for_each_possible_cpu(cpu) {
98 const u32 *rate;
339ca09d
VG
99 int len;
100
816a8de0
SK
101 /* too early to use cpu->of_node */
102 cn = of_get_cpu_node(cpu, NULL);
103 if (!cn) {
104 pr_err("missing device node for CPU %d\n", cpu);
105 continue;
106 }
339ca09d 107
4ca4f26a 108 if (topology_parse_cpu_capacity(cn, cpu)) {
06073ee2
JL
109 of_node_put(cn);
110 continue;
111 }
112
113 cap_from_dt = false;
114
339ca09d
VG
115 for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
116 if (of_device_is_compatible(cn, cpu_eff->compatible))
117 break;
118
119 if (cpu_eff->compatible == NULL)
120 continue;
121
122 rate = of_get_property(cn, "clock-frequency", &len);
123 if (!rate || len != 4) {
a8e65e06 124 pr_err("%pOF missing clock-frequency property\n", cn);
339ca09d
VG
125 continue;
126 }
127
339ca09d
VG
128 capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
129
130 /* Save min capacity of the system */
131 if (capacity < min_capacity)
132 min_capacity = capacity;
133
134 /* Save max capacity of the system */
135 if (capacity > max_capacity)
136 max_capacity = capacity;
137
816a8de0 138 cpu_capacity(cpu) = capacity;
339ca09d
VG
139 }
140
339ca09d
VG
141 /* If min and max capacities are equals, we bypass the update of the
142 * cpu_scale because all CPUs have the same capacity. Otherwise, we
143 * compute a middle_capacity factor that will ensure that the capacity
144 * of an 'average' CPU of the system will be as close as possible to
ca8ce3d0 145 * SCHED_CAPACITY_SCALE, which is the default value, but with the
339ca09d
VG
146 * constraint explained near table_efficiency[].
147 */
816a8de0 148 if (4*max_capacity < (3*(max_capacity + min_capacity)))
339ca09d 149 middle_capacity = (min_capacity + max_capacity)
ca8ce3d0 150 >> (SCHED_CAPACITY_SHIFT+1);
339ca09d
VG
151 else
152 middle_capacity = ((max_capacity / 3)
ca8ce3d0 153 >> (SCHED_CAPACITY_SHIFT-1)) + 1;
339ca09d 154
c105aa31 155 if (cap_from_dt)
4ca4f26a 156 topology_normalize_cpu_scale();
339ca09d
VG
157}
158
159/*
160 * Look for a customed capacity of a CPU in the cpu_capacity table during the
161 * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
162 * function returns directly for SMP system.
163 */
ca8ce3d0 164static void update_cpu_capacity(unsigned int cpu)
339ca09d 165{
06073ee2 166 if (!cpu_capacity(cpu) || cap_from_dt)
339ca09d
VG
167 return;
168
4ca4f26a 169 topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
339ca09d 170
4ed89f22 171 pr_info("CPU%u: update cpu_capacity %lu\n",
8ec59c0f 172 cpu, topology_get_cpu_scale(cpu));
339ca09d
VG
173}
174
175#else
176static inline void parse_dt_topology(void) {}
ca8ce3d0 177static inline void update_cpu_capacity(unsigned int cpuid) {}
339ca09d
VG
178#endif
179
fb2aa855
VG
180/*
181 * The current assumption is that we can power gate each core independently.
182 * This will be superseded by DT binding once available.
183 */
184const struct cpumask *cpu_corepower_mask(int cpu)
185{
186 return &cpu_topology[cpu].thread_sibling;
187}
188
c9018aab
VG
189/*
190 * store_cpu_topology is called at boot when only one cpu is running
191 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
192 * which prevents simultaneous write access to cpu_topology array
193 */
194void store_cpu_topology(unsigned int cpuid)
195{
ca74b316 196 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
c9018aab 197 unsigned int mpidr;
c9018aab
VG
198
199 /* If the cpu topology has been already set, just return */
200 if (cpuid_topo->core_id != -1)
201 return;
202
203 mpidr = read_cpuid_mpidr();
204
205 /* create cpu topology mapping */
206 if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
207 /*
208 * This is a multiprocessor system
209 * multiprocessor format & multiprocessor mode field are set
210 */
211
212 if (mpidr & MPIDR_MT_BITMASK) {
213 /* core performance interdependency */
71db5bfe
LP
214 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
215 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
ca74b316 216 cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
c9018aab
VG
217 } else {
218 /* largely independent cores */
219 cpuid_topo->thread_id = -1;
71db5bfe 220 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
ca74b316 221 cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
c9018aab
VG
222 }
223 } else {
224 /*
225 * This is an uniprocessor system
226 * we are in multiprocessor format but uniprocessor system
227 * or in the old uniprocessor format
228 */
229 cpuid_topo->thread_id = -1;
230 cpuid_topo->core_id = 0;
ca74b316 231 cpuid_topo->package_id = -1;
c9018aab
VG
232 }
233
cb75dacb 234 update_siblings_masks(cpuid);
c9018aab 235
ca8ce3d0 236 update_cpu_capacity(cpuid);
339ca09d 237
4ed89f22 238 pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
c9018aab
VG
239 cpuid, cpu_topology[cpuid].thread_id,
240 cpu_topology[cpuid].core_id,
ca74b316 241 cpu_topology[cpuid].package_id, mpidr);
c9018aab
VG
242}
243
b6220ad6 244static inline int cpu_corepower_flags(void)
fb2aa855
VG
245{
246 return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
247}
248
249static struct sched_domain_topology_level arm_topology[] = {
250#ifdef CONFIG_SCHED_MC
251 { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
252 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
253#endif
254 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
255 { NULL, },
256};
257
c9018aab
VG
258/*
259 * init_cpu_topology is called at boot when only one cpu is running
260 * which prevent simultaneous write access to cpu_topology array
261 */
f7e416eb 262void __init init_cpu_topology(void)
c9018aab 263{
ca74b316 264 reset_cpu_topology();
c9018aab 265 smp_wmb();
339ca09d
VG
266
267 parse_dt_topology();
fb2aa855
VG
268
269 /* Set scheduler topology descriptor */
270 set_sched_topology(arm_topology);
c9018aab 271}