Commit | Line | Data |
---|---|---|
99106986 GR |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include <linux/module.h> | |
4 | #include <linux/init.h> | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/sched.h> | |
8 | #include <linux/kernel_stat.h> | |
9 | #include <linux/notifier.h> | |
10 | #include <linux/cpu.h> | |
11 | #include <linux/percpu.h> | |
12 | #include <linux/delay.h> | |
13 | #include <linux/err.h> | |
14 | #include <linux/irq.h> | |
2c81b076 | 15 | #include <linux/irq_work.h> |
99106986 GR |
16 | #include <linux/irqdomain.h> |
17 | #include <linux/of.h> | |
18 | #include <linux/sched/task_stack.h> | |
19 | #include <linux/sched/mm.h> | |
859e5f45 | 20 | #include <linux/sched/hotplug.h> |
99106986 GR |
21 | #include <asm/irq.h> |
22 | #include <asm/traps.h> | |
23 | #include <asm/sections.h> | |
24 | #include <asm/mmu_context.h> | |
25 | #include <asm/pgalloc.h> | |
12879bda GR |
26 | #ifdef CONFIG_CPU_HAS_FPU |
27 | #include <abi/fpu.h> | |
28 | #endif | |
99106986 GR |
29 | |
30 | struct ipi_data_struct { | |
31 | unsigned long bits ____cacheline_aligned; | |
32 | }; | |
33 | static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data); | |
34 | ||
35 | enum ipi_message_type { | |
36 | IPI_EMPTY, | |
37 | IPI_RESCHEDULE, | |
38 | IPI_CALL_FUNC, | |
2c81b076 | 39 | IPI_IRQ_WORK, |
99106986 GR |
40 | IPI_MAX |
41 | }; | |
42 | ||
43 | static irqreturn_t handle_ipi(int irq, void *dev) | |
44 | { | |
45 | while (true) { | |
46 | unsigned long ops; | |
47 | ||
48 | ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0); | |
49 | if (ops == 0) | |
50 | return IRQ_HANDLED; | |
51 | ||
52 | if (ops & (1 << IPI_RESCHEDULE)) | |
53 | scheduler_ipi(); | |
54 | ||
55 | if (ops & (1 << IPI_CALL_FUNC)) | |
56 | generic_smp_call_function_interrupt(); | |
57 | ||
2c81b076 GR |
58 | if (ops & (1 << IPI_IRQ_WORK)) |
59 | irq_work_run(); | |
60 | ||
99106986 GR |
61 | BUG_ON((ops >> IPI_MAX) != 0); |
62 | } | |
63 | ||
64 | return IRQ_HANDLED; | |
65 | } | |
66 | ||
67 | static void (*send_arch_ipi)(const struct cpumask *mask); | |
68 | ||
69 | static int ipi_irq; | |
70 | void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq) | |
71 | { | |
72 | if (send_arch_ipi) | |
73 | return; | |
74 | ||
75 | send_arch_ipi = func; | |
76 | ipi_irq = irq; | |
77 | } | |
78 | ||
79 | static void | |
80 | send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) | |
81 | { | |
82 | int i; | |
83 | ||
84 | for_each_cpu(i, to_whom) | |
85 | set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits); | |
86 | ||
87 | smp_mb(); | |
88 | send_arch_ipi(to_whom); | |
89 | } | |
90 | ||
91 | void arch_send_call_function_ipi_mask(struct cpumask *mask) | |
92 | { | |
93 | send_ipi_message(mask, IPI_CALL_FUNC); | |
94 | } | |
95 | ||
96 | void arch_send_call_function_single_ipi(int cpu) | |
97 | { | |
98 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); | |
99 | } | |
100 | ||
101 | static void ipi_stop(void *unused) | |
102 | { | |
103 | while (1); | |
104 | } | |
105 | ||
106 | void smp_send_stop(void) | |
107 | { | |
108 | on_each_cpu(ipi_stop, NULL, 1); | |
109 | } | |
110 | ||
111 | void smp_send_reschedule(int cpu) | |
112 | { | |
113 | send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); | |
114 | } | |
115 | ||
2c81b076 GR |
116 | #ifdef CONFIG_IRQ_WORK |
117 | void arch_irq_work_raise(void) | |
118 | { | |
119 | send_ipi_message(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); | |
120 | } | |
121 | #endif | |
122 | ||
99106986 GR |
123 | void __init smp_prepare_boot_cpu(void) |
124 | { | |
125 | } | |
126 | ||
127 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
128 | { | |
129 | } | |
130 | ||
99106986 | 131 | static int ipi_dummy_dev; |
859e5f45 | 132 | |
99106986 GR |
133 | void __init setup_smp_ipi(void) |
134 | { | |
135 | int rc; | |
136 | ||
137 | if (ipi_irq == 0) | |
c9492737 | 138 | return; |
99106986 GR |
139 | |
140 | rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", | |
141 | &ipi_dummy_dev); | |
142 | if (rc) | |
143 | panic("%s IRQ request failed\n", __func__); | |
144 | ||
859e5f45 | 145 | enable_percpu_irq(ipi_irq, 0); |
99106986 GR |
146 | } |
147 | ||
148 | void __init setup_smp(void) | |
149 | { | |
150 | struct device_node *node = NULL; | |
151 | int cpu; | |
152 | ||
398539dd | 153 | for_each_of_cpu_node(node) { |
99106986 GR |
154 | if (!of_device_is_available(node)) |
155 | continue; | |
156 | ||
157 | if (of_property_read_u32(node, "reg", &cpu)) | |
158 | continue; | |
159 | ||
160 | if (cpu >= NR_CPUS) | |
161 | continue; | |
162 | ||
163 | set_cpu_possible(cpu, true); | |
164 | set_cpu_present(cpu, true); | |
165 | } | |
166 | } | |
167 | ||
168 | extern void _start_smp_secondary(void); | |
169 | ||
170 | volatile unsigned int secondary_hint; | |
8077e66b | 171 | volatile unsigned int secondary_hint2; |
99106986 GR |
172 | volatile unsigned int secondary_ccr; |
173 | volatile unsigned int secondary_stack; | |
174 | ||
aefd9461 GR |
175 | unsigned long secondary_msa1; |
176 | ||
99106986 GR |
177 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
178 | { | |
859e5f45 | 179 | unsigned long mask = 1 << cpu; |
99106986 | 180 | |
0f231dcf GR |
181 | secondary_stack = |
182 | (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; | |
99106986 | 183 | secondary_hint = mfcr("cr31"); |
8077e66b | 184 | secondary_hint2 = mfcr("cr<21, 1>"); |
99106986 | 185 | secondary_ccr = mfcr("cr18"); |
aefd9461 | 186 | secondary_msa1 = read_mmu_msa1(); |
99106986 GR |
187 | |
188 | /* | |
189 | * Because other CPUs are in reset status, we must flush data | |
190 | * from cache to out and secondary CPUs use them in | |
191 | * csky_start_secondary(void) | |
192 | */ | |
193 | mtcr("cr17", 0x22); | |
194 | ||
859e5f45 GR |
195 | if (mask & mfcr("cr<29, 0>")) { |
196 | send_arch_ipi(cpumask_of(cpu)); | |
197 | } else { | |
198 | /* Enable cpu in SMP reset ctrl reg */ | |
199 | mask |= mfcr("cr<29, 0>"); | |
200 | mtcr("cr<29, 0>", mask); | |
201 | } | |
99106986 GR |
202 | |
203 | /* Wait for the cpu online */ | |
204 | while (!cpu_online(cpu)); | |
205 | ||
206 | secondary_stack = 0; | |
207 | ||
208 | return 0; | |
209 | } | |
210 | ||
211 | void __init smp_cpus_done(unsigned int max_cpus) | |
212 | { | |
213 | } | |
214 | ||
215 | int setup_profiling_timer(unsigned int multiplier) | |
216 | { | |
217 | return -EINVAL; | |
218 | } | |
219 | ||
220 | void csky_start_secondary(void) | |
221 | { | |
222 | struct mm_struct *mm = &init_mm; | |
223 | unsigned int cpu = smp_processor_id(); | |
224 | ||
225 | mtcr("cr31", secondary_hint); | |
8077e66b | 226 | mtcr("cr<21, 1>", secondary_hint2); |
99106986 GR |
227 | mtcr("cr18", secondary_ccr); |
228 | ||
229 | mtcr("vbr", vec_base); | |
230 | ||
231 | flush_tlb_all(); | |
232 | write_mmu_pagemask(0); | |
233 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); | |
234 | TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); | |
235 | ||
99106986 GR |
236 | #ifdef CONFIG_CPU_HAS_FPU |
237 | init_fpu(); | |
238 | #endif | |
239 | ||
859e5f45 | 240 | enable_percpu_irq(ipi_irq, 0); |
99106986 GR |
241 | |
242 | mmget(mm); | |
243 | mmgrab(mm); | |
244 | current->active_mm = mm; | |
245 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | |
246 | ||
247 | notify_cpu_starting(cpu); | |
248 | set_cpu_online(cpu, true); | |
249 | ||
250 | pr_info("CPU%u Online: %s...\n", cpu, __func__); | |
251 | ||
252 | local_irq_enable(); | |
253 | preempt_disable(); | |
254 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); | |
255 | } | |
859e5f45 GR |
256 | |
257 | #ifdef CONFIG_HOTPLUG_CPU | |
258 | int __cpu_disable(void) | |
259 | { | |
260 | unsigned int cpu = smp_processor_id(); | |
261 | ||
262 | set_cpu_online(cpu, false); | |
263 | ||
264 | irq_migrate_all_off_this_cpu(); | |
265 | ||
266 | clear_tasks_mm_cpumask(cpu); | |
267 | ||
268 | return 0; | |
269 | } | |
270 | ||
271 | void __cpu_die(unsigned int cpu) | |
272 | { | |
273 | if (!cpu_wait_death(cpu, 5)) { | |
274 | pr_crit("CPU%u: shutdown failed\n", cpu); | |
275 | return; | |
276 | } | |
277 | pr_notice("CPU%u: shutdown\n", cpu); | |
278 | } | |
279 | ||
280 | void arch_cpu_idle_dead(void) | |
281 | { | |
282 | idle_task_exit(); | |
283 | ||
284 | cpu_report_death(); | |
285 | ||
286 | while (!secondary_stack) | |
287 | arch_cpu_idle(); | |
288 | ||
289 | local_irq_disable(); | |
290 | ||
291 | asm volatile( | |
292 | "mov sp, %0\n" | |
293 | "mov r8, %0\n" | |
294 | "jmpi csky_start_secondary" | |
295 | : | |
296 | : "r" (secondary_stack)); | |
297 | } | |
298 | #endif |