Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / arch / x86 / kernel / setup_percpu.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
40685236
JP
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4fe29a85 4#include <linux/kernel.h>
523d0fb4 5#include <linux/export.h>
4fe29a85
GOC
6#include <linux/init.h>
7#include <linux/bootmem.h>
8#include <linux/percpu.h>
1ecd2765 9#include <linux/kexec.h>
17b4cceb 10#include <linux/crash_dump.h>
8a87dd9a
JSR
11#include <linux/smp.h>
12#include <linux/topology.h>
5f5d8405 13#include <linux/pfn.h>
4fe29a85
GOC
14#include <asm/sections.h>
15#include <asm/processor.h>
523d0fb4 16#include <asm/desc.h>
4fe29a85 17#include <asm/setup.h>
0fc0906e 18#include <asm/mpspec.h>
76eb4131 19#include <asm/apicdef.h>
1ecd2765 20#include <asm/highmem.h>
1a51e3a0 21#include <asm/proto.h>
06879033 22#include <asm/cpumask.h>
34019be1 23#include <asm/cpu.h>
60a5317f 24#include <asm/stackprotector.h>
76eb4131 25
0816b0f0 26DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
ea927906 27EXPORT_PER_CPU_SYMBOL(cpu_number);
ea927906 28
1688401a
BG
29#ifdef CONFIG_X86_64
30#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
31#else
32#define BOOT_PERCPU_OFFSET 0
33#endif
34
2c773dd3 35DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
1688401a
BG
36EXPORT_PER_CPU_SYMBOL(this_cpu_off);
37
404f6aac 38unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = {
34019be1 39 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
9939ddaf 40};
9939ddaf 41EXPORT_SYMBOL(__per_cpu_offset);
4fe29a85 42
6b19b0c2
TH
43/*
44 * On x86_64 symbols referenced from code should be reachable using
45 * 32bit relocations. Reserve space for static percpu variables in
46 * modules so that they are always served from the first chunk which
47 * is located at the percpu segment base. On x86_32, anything can
48 * address anywhere. No need to reserve space in the first chunk.
49 */
50#ifdef CONFIG_X86_64
51#define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
52#else
53#define PERCPU_FIRST_CHUNK_RESERVE 0
54#endif
55
4518e6a0 56#ifdef CONFIG_X86_32
89c92151
TH
57/**
58 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
59 *
60 * If NUMA is not configured or there is only one NUMA node available,
61 * there is no reason to consider NUMA. This function determines
62 * whether percpu allocation should consider NUMA or not.
63 *
64 * RETURNS:
65 * true if NUMA should be considered; otherwise, false.
66 */
67static bool __init pcpu_need_numa(void)
68{
69#ifdef CONFIG_NEED_MULTIPLE_NODES
70 pg_data_t *last = NULL;
71 unsigned int cpu;
72
73 for_each_possible_cpu(cpu) {
74 int node = early_cpu_to_node(cpu);
75
76 if (node_online(node) && NODE_DATA(node) &&
77 last && last != NODE_DATA(node))
78 return true;
79
80 last = NODE_DATA(node);
81 }
82#endif
83 return false;
84}
4518e6a0 85#endif
89c92151 86
5f5d8405
TH
87/**
88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89 * @cpu: cpu to allocate for
90 * @size: size allocation in bytes
91 * @align: alignment
92 *
93 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
94 * does the right thing for NUMA regardless of the current
95 * configuration.
96 *
97 * RETURNS:
98 * Pointer to the allocated area on success, NULL on failure.
99 */
100static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101 unsigned long align)
102{
103 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104#ifdef CONFIG_NEED_MULTIPLE_NODES
105 int node = early_cpu_to_node(cpu);
106 void *ptr;
107
108 if (!node_online(node) || !NODE_DATA(node)) {
109 ptr = __alloc_bootmem_nopanic(size, align, goal);
110 pr_info("cpu %d has no node %d or node-local memory\n",
111 cpu, node);
112 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113 cpu, size, __pa(ptr));
114 } else {
115 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
116 size, align, goal);
40685236
JP
117 pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
118 cpu, size, node, __pa(ptr));
5f5d8405
TH
119 }
120 return ptr;
121#else
122 return __alloc_bootmem_nopanic(size, align, goal);
123#endif
124}
125
d4b95f80
TH
126/*
127 * Helpers for first chunk memory allocation
128 */
3cbc8565 129static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
d4b95f80 130{
3cbc8565 131 return pcpu_alloc_bootmem(cpu, size, align);
d4b95f80
TH
132}
133
134static void __init pcpu_fc_free(void *ptr, size_t size)
135{
136 free_bootmem(__pa(ptr), size);
137}
138
4518e6a0 139static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
a530b795 140{
4518e6a0 141#ifdef CONFIG_NEED_MULTIPLE_NODES
a530b795
TH
142 if (early_cpu_to_node(from) == early_cpu_to_node(to))
143 return LOCAL_DISTANCE;
144 else
145 return REMOTE_DISTANCE;
8ac83757 146#else
4518e6a0 147 return LOCAL_DISTANCE;
8ac83757 148#endif
89c92151
TH
149}
150
00ae4064 151static void __init pcpup_populate_pte(unsigned long addr)
458a3e64
TH
152{
153 populate_extra_pte(addr);
154}
155
b2d2f431
BG
156static inline void setup_percpu_segment(int cpu)
157{
158#ifdef CONFIG_X86_32
1dd439fe
TG
159 struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu),
160 0xFFFFF);
b2d2f431 161
1dd439fe 162 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
b2d2f431
BG
163#endif
164}
165
4fe29a85
GOC
166void __init setup_per_cpu_areas(void)
167{
5f5d8405 168 unsigned int cpu;
11124411 169 unsigned long delta;
fb435d52 170 int rc;
a1681965 171
9b130ad5 172 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%d\n",
a1681965 173 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
11124411 174
8ac83757 175 /*
4518e6a0
TH
176 * Allocate percpu area. Embedding allocator is our favorite;
177 * however, on NUMA configurations, it can result in very
178 * sparse unit mapping and vmalloc area isn't spacious enough
179 * on 32bit. Use page in that case.
8ac83757 180 */
4518e6a0
TH
181#ifdef CONFIG_X86_32
182 if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
183 pcpu_chosen_fc = PCPU_FC_PAGE;
184#endif
fb435d52 185 rc = -EINVAL;
4518e6a0 186 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
4518e6a0
TH
187 const size_t dyn_size = PERCPU_MODULE_RESERVE +
188 PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
d5e28005
TH
189 size_t atom_size;
190
191 /*
192 * On 64bit, use PMD_SIZE for atom_size so that embedded
193 * percpu areas are aligned to PMD. This, in the future,
194 * can also allow using PMD mappings in vmalloc area. Use
195 * PAGE_SIZE on 32bit as vmalloc space is highly contended
196 * and large vmalloc area allocs can easily fail.
197 */
198#ifdef CONFIG_X86_64
199 atom_size = PMD_SIZE;
200#else
201 atom_size = PAGE_SIZE;
202#endif
4518e6a0
TH
203 rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
204 dyn_size, atom_size,
205 pcpu_cpu_distance,
206 pcpu_fc_alloc, pcpu_fc_free);
fb435d52 207 if (rc < 0)
40685236 208 pr_warning("%s allocator failed (%d), falling back to page size\n",
4518e6a0 209 pcpu_fc_names[pcpu_chosen_fc], rc);
fa8a7094 210 }
fb435d52 211 if (rc < 0)
4518e6a0
TH
212 rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
213 pcpu_fc_alloc, pcpu_fc_free,
214 pcpup_populate_pte);
fb435d52
TH
215 if (rc < 0)
216 panic("cannot initialize percpu area (err=%d)", rc);
11124411 217
5f5d8405 218 /* alrighty, percpu areas up and running */
11124411
TH
219 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
220 for_each_possible_cpu(cpu) {
fb435d52 221 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
26f80bd6 222 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
ea927906 223 per_cpu(cpu_number, cpu) = cpu;
b2d2f431 224 setup_percpu_segment(cpu);
60a5317f 225 setup_stack_canary_segment(cpu);
0d77e7f0 226 /*
cf3997f5
TH
227 * Copy data used in early init routines from the
228 * initial arrays to the per cpu data areas. These
229 * arrays then become expendable and the *_early_ptr's
230 * are zeroed indicating that the static arrays are
231 * gone.
0d77e7f0 232 */
ec70de8b 233#ifdef CONFIG_X86_LOCAL_APIC
0d77e7f0 234 per_cpu(x86_cpu_to_apicid, cpu) =
cf3997f5 235 early_per_cpu_map(x86_cpu_to_apicid, cpu);
0d77e7f0 236 per_cpu(x86_bios_cpu_apicid, cpu) =
cf3997f5 237 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
3e9e57fa
VK
238 per_cpu(x86_cpu_to_acpiid, cpu) =
239 early_per_cpu_map(x86_cpu_to_acpiid, cpu);
ec70de8b 240#endif
4c321ff8
TH
241#ifdef CONFIG_X86_32
242 per_cpu(x86_cpu_to_logical_apicid, cpu) =
243 early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
244#endif
1a51e3a0 245#ifdef CONFIG_X86_64
26f80bd6 246 per_cpu(irq_stack_ptr, cpu) =
cf3997f5 247 per_cpu(irq_stack_union.irq_stack, cpu) +
4950d6d4 248 IRQ_STACK_SIZE;
645a7919 249#endif
6470aff6
BG
250#ifdef CONFIG_NUMA
251 per_cpu(x86_cpu_to_node_map, cpu) =
cf3997f5 252 early_per_cpu_map(x86_cpu_to_node_map, cpu);
9aebbdb6 253 /*
a4ce96ac 254 * Ensure that the boot cpu numa_node is correct when the boot
9aebbdb6
YL
255 * cpu is on a node that doesn't have memory installed.
256 * Also cpu_up() will call cpu_to_node() for APs when
257 * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
258 * up later with c_init aka intel_init/amd_init.
259 * So set them all (boot cpu and all APs).
260 */
261 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
6470aff6 262#endif
1a51e3a0 263 /*
c273fb3b 264 * Up to this point, the boot CPU has been using .init.data
2697fbd5 265 * area. Reload any changed state for the boot CPU.
1a51e3a0 266 */
f6e9456c 267 if (!cpu)
552be871 268 switch_to_new_gdt(cpu);
4fe29a85
GOC
269 }
270
0d77e7f0 271 /* indicate the early static arrays will soon be gone */
22f25138 272#ifdef CONFIG_X86_LOCAL_APIC
0d77e7f0
BG
273 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
274 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
3e9e57fa 275 early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL;
22f25138 276#endif
4c321ff8
TH
277#ifdef CONFIG_X86_32
278 early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
279#endif
645a7919 280#ifdef CONFIG_NUMA
0d77e7f0
BG
281 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
282#endif
9f0e8d04 283
9f248bde
MT
284 /* Setup node to cpumask map */
285 setup_node_to_cpumask_map();
c2d1cec1
MT
286
287 /* Setup cpu initialized, callin, callout masks */
288 setup_cpu_local_masks();
23b2a4dd 289
23b2a4dd 290 /*
d2b6dc61
AL
291 * Sync back kernel address range again. We already did this in
292 * setup_arch(), but percpu data also needs to be available in
293 * the smpboot asm. We can't reliably pick up percpu mappings
294 * using vmalloc_fault(), because exception dispatch needs
295 * percpu data.
945fd17a
TG
296 *
297 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
298 * this call?
23b2a4dd 299 */
945fd17a 300 sync_initial_page_table();
4fe29a85 301}