2 * arch/arm64/kernel/topology.c
4 * Copyright (C) 2011,2013,2014 Linaro Limited.
6 * Based on the arm32 version written by Vincent Guittot in turn based on
7 * arch/sh/kernel/topology.c
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/acpi.h>
15 #include <linux/arch_topology.h>
16 #include <linux/cacheinfo.h>
17 #include <linux/cpufreq.h>
18 #include <linux/init.h>
19 #include <linux/percpu.h>
22 #include <asm/cputype.h>
23 #include <asm/topology.h>
25 void store_cpu_topology(unsigned int cpuid)
27 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
30 if (cpuid_topo->package_id != -1)
31 goto topology_populated;
33 mpidr = read_cpuid_mpidr();
35 /* Uniprocessor systems can rely on default topology values */
36 if (mpidr & MPIDR_UP_BITMASK)
40 * This would be the place to create cpu topology based on MPIDR.
42 * However, it cannot be trusted to depict the actual topology; some
43 * pieces of the architecture enforce an artificial cap on Aff0 values
44 * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
45 * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
46 * having absolutely no relationship to the actual underlying system
47 * topology, and cannot be reasonably used as core / package ID.
49 * If the MT bit is set, Aff0 *could* be used to define a thread ID, but
50 * we still wouldn't be able to obtain a sane core ID. This means we
51 * need to entirely ignore MPIDR for any topology deduction.
53 cpuid_topo->thread_id = -1;
54 cpuid_topo->core_id = cpuid;
55 cpuid_topo->package_id = cpu_to_node(cpuid);
57 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
58 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
59 cpuid_topo->thread_id, mpidr);
62 update_siblings_masks(cpuid);
66 static bool __init acpi_cpu_is_threaded(int cpu)
68 int is_threaded = acpi_pptt_cpu_is_thread(cpu);
71 * if the PPTT doesn't have thread information, assume a homogeneous
72 * machine and return the current CPU's thread state.
75 is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
81 * Propagate the topology information of the processor_topology_node tree to the
84 int __init parse_acpi_topology(void)
91 for_each_possible_cpu(cpu) {
94 topology_id = find_acpi_cpu_topology(cpu, 0);
98 if (acpi_cpu_is_threaded(cpu)) {
99 cpu_topology[cpu].thread_id = topology_id;
100 topology_id = find_acpi_cpu_topology(cpu, 1);
101 cpu_topology[cpu].core_id = topology_id;
103 cpu_topology[cpu].thread_id = -1;
104 cpu_topology[cpu].core_id = topology_id;
106 topology_id = find_acpi_cpu_topology_package(cpu);
107 cpu_topology[cpu].package_id = topology_id;
109 i = acpi_find_last_cache_level(cpu);
113 * this is the only part of cpu_topology that has
114 * a direct relationship with the cache topology
116 cache_id = find_acpi_cpu_cache_topology(cpu, i);
118 cpu_topology[cpu].llc_id = cache_id;
126 #ifdef CONFIG_ARM64_AMU_EXTN
129 #define pr_fmt(fmt) "AMU: " fmt
131 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
132 static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
133 static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
134 static cpumask_var_t amu_fie_cpus;
136 /* Initialize counter reference per-cpu variables for the current CPU */
137 void init_cpu_freq_invariance_counters(void)
139 this_cpu_write(arch_core_cycles_prev,
140 read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0));
141 this_cpu_write(arch_const_cycles_prev,
142 read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0));
145 static int validate_cpu_freq_invariance_counters(int cpu)
147 u64 max_freq_hz, ratio;
149 if (!cpu_has_amu_feat(cpu)) {
150 pr_debug("CPU%d: counters are not supported.\n", cpu);
154 if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
155 !per_cpu(arch_core_cycles_prev, cpu))) {
156 pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
160 /* Convert maximum frequency from KHz to Hz and validate */
161 max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000;
162 if (unlikely(!max_freq_hz)) {
163 pr_debug("CPU%d: invalid maximum frequency.\n", cpu);
168 * Pre-compute the fixed ratio between the frequency of the constant
169 * counter and the maximum frequency of the CPU.
172 * arch_max_freq_scale = ---------------- * SCHED_CAPACITY_SCALEĀ²
175 * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALEĀ²
176 * in order to ensure a good resolution for arch_max_freq_scale for
177 * very low arch timer frequencies (down to the KHz range which should
180 ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT);
181 ratio = div64_u64(ratio, max_freq_hz);
183 WARN_ONCE(1, "System timer frequency too low.\n");
187 per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
193 enable_policy_freq_counters(int cpu, cpumask_var_t valid_cpus)
195 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
198 pr_debug("CPU%d: No cpufreq policy found.\n", cpu);
202 if (cpumask_subset(policy->related_cpus, valid_cpus))
203 cpumask_or(amu_fie_cpus, policy->related_cpus,
206 cpufreq_cpu_put(policy);
211 static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
212 #define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
214 static int __init init_amu_fie(void)
216 cpumask_var_t valid_cpus;
217 bool have_policy = false;
221 if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL))
224 if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
226 goto free_valid_mask;
229 for_each_present_cpu(cpu) {
230 if (validate_cpu_freq_invariance_counters(cpu))
232 cpumask_set_cpu(cpu, valid_cpus);
233 have_policy |= enable_policy_freq_counters(cpu, valid_cpus);
237 * If we are not restricted by cpufreq policies, we only enable
238 * the use of the AMU feature for FIE if all CPUs support AMU.
239 * Otherwise, enable_policy_freq_counters has already enabled
242 if (!have_policy && cpumask_equal(valid_cpus, cpu_present_mask))
243 cpumask_or(amu_fie_cpus, amu_fie_cpus, valid_cpus);
245 if (!cpumask_empty(amu_fie_cpus)) {
246 pr_info("CPUs[%*pbl]: counters will be used for FIE.",
247 cpumask_pr_args(amu_fie_cpus));
248 static_branch_enable(&amu_fie_key);
252 free_cpumask_var(valid_cpus);
256 late_initcall_sync(init_amu_fie);
258 bool arch_freq_counters_available(struct cpumask *cpus)
260 return amu_freq_invariant() &&
261 cpumask_subset(cpus, amu_fie_cpus);
264 void topology_scale_freq_tick(void)
266 u64 prev_core_cnt, prev_const_cnt;
267 u64 core_cnt, const_cnt, scale;
268 int cpu = smp_processor_id();
270 if (!amu_freq_invariant())
273 if (!cpumask_test_cpu(cpu, amu_fie_cpus))
276 const_cnt = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0);
277 core_cnt = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0);
278 prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
279 prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
281 if (unlikely(core_cnt <= prev_core_cnt ||
282 const_cnt <= prev_const_cnt))
286 * /\core arch_max_freq_scale
287 * scale = ------- * --------------------
288 * /\const SCHED_CAPACITY_SCALE
290 * See validate_cpu_freq_invariance_counters() for details on
291 * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
293 scale = core_cnt - prev_core_cnt;
294 scale *= this_cpu_read(arch_max_freq_scale);
295 scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
296 const_cnt - prev_const_cnt);
298 scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
299 this_cpu_write(freq_scale, (unsigned long)scale);
302 this_cpu_write(arch_core_cycles_prev, core_cnt);
303 this_cpu_write(arch_const_cycles_prev, const_cnt);
305 #endif /* CONFIG_ARM64_AMU_EXTN */