1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/kernel.h>
5 #include <linux/export.h>
6 #include <linux/init.h>
7 #include <linux/bootmem.h>
8 #include <linux/memblock.h>
9 #include <linux/percpu.h>
10 #include <linux/kexec.h>
11 #include <linux/crash_dump.h>
12 #include <linux/smp.h>
13 #include <linux/topology.h>
14 #include <linux/pfn.h>
15 #include <asm/sections.h>
16 #include <asm/processor.h>
18 #include <asm/setup.h>
19 #include <asm/mpspec.h>
20 #include <asm/apicdef.h>
21 #include <asm/highmem.h>
22 #include <asm/proto.h>
23 #include <asm/cpumask.h>
25 #include <asm/stackprotector.h>
27 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
28 EXPORT_PER_CPU_SYMBOL(cpu_number);
31 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
33 #define BOOT_PERCPU_OFFSET 0
36 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
37 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
39 unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = {
40 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
42 EXPORT_SYMBOL(__per_cpu_offset);
45 * On x86_64 symbols referenced from code should be reachable using
46 * 32bit relocations. Reserve space for static percpu variables in
47 * modules so that they are always served from the first chunk which
48 * is located at the percpu segment base. On x86_32, anything can
49 * address anywhere. No need to reserve space in the first chunk.
52 #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
54 #define PERCPU_FIRST_CHUNK_RESERVE 0
59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
61 * If NUMA is not configured or there is only one NUMA node available,
62 * there is no reason to consider NUMA. This function determines
63 * whether percpu allocation should consider NUMA or not.
66 * true if NUMA should be considered; otherwise, false.
68 static bool __init pcpu_need_numa(void)
70 #ifdef CONFIG_NEED_MULTIPLE_NODES
71 pg_data_t *last = NULL;
74 for_each_possible_cpu(cpu) {
75 int node = early_cpu_to_node(cpu);
77 if (node_online(node) && NODE_DATA(node) &&
78 last && last != NODE_DATA(node))
81 last = NODE_DATA(node);
89 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
90 * @cpu: cpu to allocate for
91 * @size: size allocation in bytes
94 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
95 * does the right thing for NUMA regardless of the current
99 * Pointer to the allocated area on success, NULL on failure.
101 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
104 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
105 #ifdef CONFIG_NEED_MULTIPLE_NODES
106 int node = early_cpu_to_node(cpu);
109 if (!node_online(node) || !NODE_DATA(node)) {
110 ptr = memblock_alloc_from_nopanic(size, align, goal);
111 pr_info("cpu %d has no node %d or node-local memory\n",
113 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
114 cpu, size, __pa(ptr));
116 ptr = memblock_alloc_try_nid_nopanic(size, align, goal,
117 BOOTMEM_ALLOC_ACCESSIBLE,
120 pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
121 cpu, size, node, __pa(ptr));
125 return memblock_alloc_from_nopanic(size, align, goal);
130 * Helpers for first chunk memory allocation
132 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
134 return pcpu_alloc_bootmem(cpu, size, align);
137 static void __init pcpu_fc_free(void *ptr, size_t size)
139 memblock_free(__pa(ptr), size);
142 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
144 #ifdef CONFIG_NEED_MULTIPLE_NODES
145 if (early_cpu_to_node(from) == early_cpu_to_node(to))
146 return LOCAL_DISTANCE;
148 return REMOTE_DISTANCE;
150 return LOCAL_DISTANCE;
154 static void __init pcpup_populate_pte(unsigned long addr)
156 populate_extra_pte(addr);
159 static inline void setup_percpu_segment(int cpu)
162 struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu),
165 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
169 void __init setup_per_cpu_areas(void)
175 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%d\n",
176 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
179 * Allocate percpu area. Embedding allocator is our favorite;
180 * however, on NUMA configurations, it can result in very
181 * sparse unit mapping and vmalloc area isn't spacious enough
182 * on 32bit. Use page in that case.
185 if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
186 pcpu_chosen_fc = PCPU_FC_PAGE;
189 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
190 const size_t dyn_size = PERCPU_MODULE_RESERVE +
191 PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
195 * On 64bit, use PMD_SIZE for atom_size so that embedded
196 * percpu areas are aligned to PMD. This, in the future,
197 * can also allow using PMD mappings in vmalloc area. Use
198 * PAGE_SIZE on 32bit as vmalloc space is highly contended
199 * and large vmalloc area allocs can easily fail.
202 atom_size = PMD_SIZE;
204 atom_size = PAGE_SIZE;
206 rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
209 pcpu_fc_alloc, pcpu_fc_free);
211 pr_warning("%s allocator failed (%d), falling back to page size\n",
212 pcpu_fc_names[pcpu_chosen_fc], rc);
215 rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
216 pcpu_fc_alloc, pcpu_fc_free,
219 panic("cannot initialize percpu area (err=%d)", rc);
221 /* alrighty, percpu areas up and running */
222 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
223 for_each_possible_cpu(cpu) {
224 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
225 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
226 per_cpu(cpu_number, cpu) = cpu;
227 setup_percpu_segment(cpu);
228 setup_stack_canary_segment(cpu);
230 * Copy data used in early init routines from the
231 * initial arrays to the per cpu data areas. These
232 * arrays then become expendable and the *_early_ptr's
233 * are zeroed indicating that the static arrays are
236 #ifdef CONFIG_X86_LOCAL_APIC
237 per_cpu(x86_cpu_to_apicid, cpu) =
238 early_per_cpu_map(x86_cpu_to_apicid, cpu);
239 per_cpu(x86_bios_cpu_apicid, cpu) =
240 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
241 per_cpu(x86_cpu_to_acpiid, cpu) =
242 early_per_cpu_map(x86_cpu_to_acpiid, cpu);
245 per_cpu(x86_cpu_to_logical_apicid, cpu) =
246 early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
249 per_cpu(irq_stack_ptr, cpu) =
250 per_cpu(irq_stack_union.irq_stack, cpu) +
254 per_cpu(x86_cpu_to_node_map, cpu) =
255 early_per_cpu_map(x86_cpu_to_node_map, cpu);
257 * Ensure that the boot cpu numa_node is correct when the boot
258 * cpu is on a node that doesn't have memory installed.
259 * Also cpu_up() will call cpu_to_node() for APs when
260 * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
261 * up later with c_init aka intel_init/amd_init.
262 * So set them all (boot cpu and all APs).
264 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
267 * Up to this point, the boot CPU has been using .init.data
268 * area. Reload any changed state for the boot CPU.
271 switch_to_new_gdt(cpu);
274 /* indicate the early static arrays will soon be gone */
275 #ifdef CONFIG_X86_LOCAL_APIC
276 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
277 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
278 early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL;
281 early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
284 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
287 /* Setup node to cpumask map */
288 setup_node_to_cpumask_map();
290 /* Setup cpu initialized, callin, callout masks */
291 setup_cpu_local_masks();
294 * Sync back kernel address range again. We already did this in
295 * setup_arch(), but percpu data also needs to be available in
296 * the smpboot asm. We can't reliably pick up percpu mappings
297 * using vmalloc_fault(), because exception dispatch needs
300 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
303 sync_initial_page_table();