Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
40685236 JP |
2 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
3 | ||
4fe29a85 | 4 | #include <linux/kernel.h> |
523d0fb4 | 5 | #include <linux/export.h> |
4fe29a85 | 6 | #include <linux/init.h> |
2013288f | 7 | #include <linux/memblock.h> |
4fe29a85 | 8 | #include <linux/percpu.h> |
1ecd2765 | 9 | #include <linux/kexec.h> |
17b4cceb | 10 | #include <linux/crash_dump.h> |
8a87dd9a JSR |
11 | #include <linux/smp.h> |
12 | #include <linux/topology.h> | |
5f5d8405 | 13 | #include <linux/pfn.h> |
b3883a9a | 14 | #include <linux/stackprotector.h> |
4fe29a85 GOC |
15 | #include <asm/sections.h> |
16 | #include <asm/processor.h> | |
523d0fb4 | 17 | #include <asm/desc.h> |
4fe29a85 | 18 | #include <asm/setup.h> |
0fc0906e | 19 | #include <asm/mpspec.h> |
76eb4131 | 20 | #include <asm/apicdef.h> |
1ecd2765 | 21 | #include <asm/highmem.h> |
1a51e3a0 | 22 | #include <asm/proto.h> |
06879033 | 23 | #include <asm/cpumask.h> |
34019be1 | 24 | #include <asm/cpu.h> |
76eb4131 | 25 | |
01c7bc51 BG |
26 | DEFINE_PER_CPU_CACHE_HOT(int, cpu_number); |
27 | EXPORT_PER_CPU_SYMBOL(cpu_number); | |
28 | ||
06aa0305 | 29 | DEFINE_PER_CPU_CACHE_HOT(unsigned long, this_cpu_off); |
1688401a BG |
30 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); |
31 | ||
9d7de2aa | 32 | unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init; |
9939ddaf | 33 | EXPORT_SYMBOL(__per_cpu_offset); |
4fe29a85 | 34 | |
6b19b0c2 TH |
35 | /* |
36 | * On x86_64 symbols referenced from code should be reachable using | |
37 | * 32bit relocations. Reserve space for static percpu variables in | |
38 | * modules so that they are always served from the first chunk which | |
39 | * is located at the percpu segment base. On x86_32, anything can | |
40 | * address anywhere. No need to reserve space in the first chunk. | |
41 | */ | |
42 | #ifdef CONFIG_X86_64 | |
43 | #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE | |
44 | #else | |
45 | #define PERCPU_FIRST_CHUNK_RESERVE 0 | |
46 | #endif | |
47 | ||
4518e6a0 | 48 | #ifdef CONFIG_X86_32 |
89c92151 TH |
49 | /** |
50 | * pcpu_need_numa - determine percpu allocation needs to consider NUMA | |
51 | * | |
52 | * If NUMA is not configured or there is only one NUMA node available, | |
53 | * there is no reason to consider NUMA. This function determines | |
54 | * whether percpu allocation should consider NUMA or not. | |
55 | * | |
56 | * RETURNS: | |
57 | * true if NUMA should be considered; otherwise, false. | |
58 | */ | |
59 | static bool __init pcpu_need_numa(void) | |
60 | { | |
a9ee6cf5 | 61 | #ifdef CONFIG_NUMA |
89c92151 TH |
62 | pg_data_t *last = NULL; |
63 | unsigned int cpu; | |
64 | ||
65 | for_each_possible_cpu(cpu) { | |
66 | int node = early_cpu_to_node(cpu); | |
67 | ||
68 | if (node_online(node) && NODE_DATA(node) && | |
69 | last && last != NODE_DATA(node)) | |
70 | return true; | |
71 | ||
72 | last = NODE_DATA(node); | |
73 | } | |
74 | #endif | |
75 | return false; | |
76 | } | |
4518e6a0 | 77 | #endif |
89c92151 | 78 | |
4518e6a0 | 79 | static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) |
a530b795 | 80 | { |
a9ee6cf5 | 81 | #ifdef CONFIG_NUMA |
a530b795 TH |
82 | if (early_cpu_to_node(from) == early_cpu_to_node(to)) |
83 | return LOCAL_DISTANCE; | |
84 | else | |
85 | return REMOTE_DISTANCE; | |
8ac83757 | 86 | #else |
4518e6a0 | 87 | return LOCAL_DISTANCE; |
8ac83757 | 88 | #endif |
89c92151 TH |
89 | } |
90 | ||
1ca3fb3a KW |
91 | static int __init pcpu_cpu_to_node(int cpu) |
92 | { | |
93 | return early_cpu_to_node(cpu); | |
94 | } | |
95 | ||
20c03576 | 96 | void __init pcpu_populate_pte(unsigned long addr) |
458a3e64 TH |
97 | { |
98 | populate_extra_pte(addr); | |
99 | } | |
100 | ||
b2d2f431 BG |
101 | static inline void setup_percpu_segment(int cpu) |
102 | { | |
103 | #ifdef CONFIG_X86_32 | |
bc90aefa | 104 | struct desc_struct d = GDT_ENTRY_INIT(DESC_DATA32, |
1445f6e1 | 105 | per_cpu_offset(cpu), 0xFFFFF); |
b2d2f431 | 106 | |
1dd439fe | 107 | write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S); |
b2d2f431 BG |
108 | #endif |
109 | } | |
110 | ||
4fe29a85 GOC |
111 | void __init setup_per_cpu_areas(void) |
112 | { | |
5f5d8405 | 113 | unsigned int cpu; |
11124411 | 114 | unsigned long delta; |
fb435d52 | 115 | int rc; |
a1681965 | 116 | |
b9726c26 | 117 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n", |
a1681965 | 118 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
11124411 | 119 | |
8ac83757 | 120 | /* |
4518e6a0 TH |
121 | * Allocate percpu area. Embedding allocator is our favorite; |
122 | * however, on NUMA configurations, it can result in very | |
123 | * sparse unit mapping and vmalloc area isn't spacious enough | |
124 | * on 32bit. Use page in that case. | |
8ac83757 | 125 | */ |
4518e6a0 TH |
126 | #ifdef CONFIG_X86_32 |
127 | if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa()) | |
128 | pcpu_chosen_fc = PCPU_FC_PAGE; | |
129 | #endif | |
fb435d52 | 130 | rc = -EINVAL; |
4518e6a0 | 131 | if (pcpu_chosen_fc != PCPU_FC_PAGE) { |
4518e6a0 TH |
132 | const size_t dyn_size = PERCPU_MODULE_RESERVE + |
133 | PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; | |
d5e28005 TH |
134 | size_t atom_size; |
135 | ||
136 | /* | |
137 | * On 64bit, use PMD_SIZE for atom_size so that embedded | |
138 | * percpu areas are aligned to PMD. This, in the future, | |
139 | * can also allow using PMD mappings in vmalloc area. Use | |
140 | * PAGE_SIZE on 32bit as vmalloc space is highly contended | |
141 | * and large vmalloc area allocs can easily fail. | |
142 | */ | |
143 | #ifdef CONFIG_X86_64 | |
144 | atom_size = PMD_SIZE; | |
145 | #else | |
146 | atom_size = PAGE_SIZE; | |
147 | #endif | |
4518e6a0 TH |
148 | rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, |
149 | dyn_size, atom_size, | |
150 | pcpu_cpu_distance, | |
23f91716 | 151 | pcpu_cpu_to_node); |
fb435d52 | 152 | if (rc < 0) |
8d3bcc44 KW |
153 | pr_warn("%s allocator failed (%d), falling back to page size\n", |
154 | pcpu_fc_names[pcpu_chosen_fc], rc); | |
fa8a7094 | 155 | } |
fb435d52 | 156 | if (rc < 0) |
4518e6a0 | 157 | rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, |
20c03576 | 158 | pcpu_cpu_to_node); |
fb435d52 TH |
159 | if (rc < 0) |
160 | panic("cannot initialize percpu area (err=%d)", rc); | |
11124411 | 161 | |
5f5d8405 | 162 | /* alrighty, percpu areas up and running */ |
11124411 TH |
163 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
164 | for_each_possible_cpu(cpu) { | |
fb435d52 | 165 | per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; |
26f80bd6 | 166 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); |
01c7bc51 | 167 | per_cpu(cpu_number, cpu) = cpu; |
b2d2f431 | 168 | setup_percpu_segment(cpu); |
0d77e7f0 | 169 | /* |
cf3997f5 TH |
170 | * Copy data used in early init routines from the |
171 | * initial arrays to the per cpu data areas. These | |
172 | * arrays then become expendable and the *_early_ptr's | |
173 | * are zeroed indicating that the static arrays are | |
174 | * gone. | |
0d77e7f0 | 175 | */ |
ec70de8b | 176 | #ifdef CONFIG_X86_LOCAL_APIC |
0d77e7f0 | 177 | per_cpu(x86_cpu_to_apicid, cpu) = |
cf3997f5 | 178 | early_per_cpu_map(x86_cpu_to_apicid, cpu); |
3e9e57fa VK |
179 | per_cpu(x86_cpu_to_acpiid, cpu) = |
180 | early_per_cpu_map(x86_cpu_to_acpiid, cpu); | |
ec70de8b | 181 | #endif |
6470aff6 BG |
182 | #ifdef CONFIG_NUMA |
183 | per_cpu(x86_cpu_to_node_map, cpu) = | |
cf3997f5 | 184 | early_per_cpu_map(x86_cpu_to_node_map, cpu); |
9aebbdb6 | 185 | /* |
a4ce96ac | 186 | * Ensure that the boot cpu numa_node is correct when the boot |
9aebbdb6 YL |
187 | * cpu is on a node that doesn't have memory installed. |
188 | * Also cpu_up() will call cpu_to_node() for APs when | |
189 | * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set | |
190 | * up later with c_init aka intel_init/amd_init. | |
191 | * So set them all (boot cpu and all APs). | |
192 | */ | |
193 | set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); | |
6470aff6 | 194 | #endif |
1a51e3a0 | 195 | /* |
c273fb3b | 196 | * Up to this point, the boot CPU has been using .init.data |
2697fbd5 | 197 | * area. Reload any changed state for the boot CPU. |
1a51e3a0 | 198 | */ |
f6e9456c | 199 | if (!cpu) |
1f19e2d5 | 200 | switch_gdt_and_percpu_base(cpu); |
4fe29a85 GOC |
201 | } |
202 | ||
0d77e7f0 | 203 | /* indicate the early static arrays will soon be gone */ |
22f25138 | 204 | #ifdef CONFIG_X86_LOCAL_APIC |
0d77e7f0 | 205 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; |
3e9e57fa | 206 | early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL; |
22f25138 | 207 | #endif |
645a7919 | 208 | #ifdef CONFIG_NUMA |
0d77e7f0 BG |
209 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; |
210 | #endif | |
9f0e8d04 | 211 | |
9f248bde MT |
212 | /* Setup node to cpumask map */ |
213 | setup_node_to_cpumask_map(); | |
c2d1cec1 MT |
214 | |
215 | /* Setup cpu initialized, callin, callout masks */ | |
216 | setup_cpu_local_masks(); | |
23b2a4dd | 217 | |
23b2a4dd | 218 | /* |
d2b6dc61 AL |
219 | * Sync back kernel address range again. We already did this in |
220 | * setup_arch(), but percpu data also needs to be available in | |
7f0a002b JR |
221 | * the smpboot asm and arch_sync_kernel_mappings() doesn't sync to |
222 | * swapper_pg_dir on 32-bit. The per-cpu mappings need to be available | |
223 | * there too. | |
945fd17a TG |
224 | * |
225 | * FIXME: Can the later sync in setup_cpu_entry_areas() replace | |
226 | * this call? | |
23b2a4dd | 227 | */ |
945fd17a | 228 | sync_initial_page_table(); |
4fe29a85 | 229 | } |