Commit | Line | Data |
---|---|---|
0ee958e1 PB |
1 | /* |
2 | * Copyright (C) 2013 Imagination Technologies | |
3 | * Author: Paul Burton <paul.burton@imgtec.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License as published by the | |
7 | * Free Software Foundation; either version 2 of the License, or (at your | |
8 | * option) any later version. | |
9 | */ | |
10 | ||
a8c20614 | 11 | #include <linux/delay.h> |
0ee958e1 | 12 | #include <linux/io.h> |
4060bbe9 | 13 | #include <linux/irqchip/mips-gic.h> |
0ee958e1 PB |
14 | #include <linux/sched.h> |
15 | #include <linux/slab.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/types.h> | |
18 | ||
0fc0708a | 19 | #include <asm/bcache.h> |
0ee958e1 PB |
20 | #include <asm/mips-cm.h> |
21 | #include <asm/mips-cpc.h> | |
22 | #include <asm/mips_mt.h> | |
23 | #include <asm/mipsregs.h> | |
1d8f1f5a | 24 | #include <asm/pm-cps.h> |
0fc0708a | 25 | #include <asm/r4kcache.h> |
0ee958e1 PB |
26 | #include <asm/smp-cps.h> |
27 | #include <asm/time.h> | |
28 | #include <asm/uasm.h> | |
29 | ||
6422a913 | 30 | static bool threads_disabled; |
0ee958e1 PB |
31 | static DECLARE_BITMAP(core_power, NR_CPUS); |
32 | ||
245a7868 | 33 | struct core_boot_config *mips_cps_core_bootcfg; |
0ee958e1 | 34 | |
6422a913 PB |
35 | static int __init setup_nothreads(char *s) |
36 | { | |
37 | threads_disabled = true; | |
38 | return 0; | |
39 | } | |
40 | early_param("nothreads", setup_nothreads); | |
41 | ||
245a7868 | 42 | static unsigned core_vpe_count(unsigned core) |
0ee958e1 | 43 | { |
245a7868 | 44 | unsigned cfg; |
0ee958e1 | 45 | |
6422a913 PB |
46 | if (threads_disabled) |
47 | return 1; | |
48 | ||
97f2645f MY |
49 | if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) |
50 | && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp)) | |
245a7868 | 51 | return 1; |
0ee958e1 | 52 | |
4ede3161 | 53 | mips_cm_lock_other(core, 0); |
245a7868 | 54 | cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; |
4ede3161 | 55 | mips_cm_unlock_other(); |
245a7868 | 56 | return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; |
0ee958e1 PB |
57 | } |
58 | ||
59 | static void __init cps_smp_setup(void) | |
60 | { | |
61 | unsigned int ncores, nvpes, core_vpes; | |
5a3e7c02 | 62 | unsigned long core_entry; |
0ee958e1 | 63 | int c, v; |
0ee958e1 PB |
64 | |
65 | /* Detect & record VPE topology */ | |
66 | ncores = mips_cm_numcores(); | |
5a3e7c02 | 67 | pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE"); |
0ee958e1 | 68 | for (c = nvpes = 0; c < ncores; c++) { |
245a7868 | 69 | core_vpes = core_vpe_count(c); |
0ee958e1 PB |
70 | pr_cont("%c%u", c ? ',' : '{', core_vpes); |
71 | ||
245a7868 PB |
72 | /* Use the number of VPEs in core 0 for smp_num_siblings */ |
73 | if (!c) | |
74 | smp_num_siblings = core_vpes; | |
75 | ||
0ee958e1 PB |
76 | for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { |
77 | cpu_data[nvpes + v].core = c; | |
5a3e7c02 | 78 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) |
0ee958e1 PB |
79 | cpu_data[nvpes + v].vpe_id = v; |
80 | #endif | |
81 | } | |
82 | ||
83 | nvpes += core_vpes; | |
84 | } | |
85 | pr_cont("} total %u\n", nvpes); | |
86 | ||
87 | /* Indicate present CPUs (CPU being synonymous with VPE) */ | |
88 | for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { | |
89 | set_cpu_possible(v, true); | |
90 | set_cpu_present(v, true); | |
91 | __cpu_number_map[v] = v; | |
92 | __cpu_logical_map[v] = v; | |
93 | } | |
94 | ||
33b68665 PB |
95 | /* Set a coherent default CCA (CWB) */ |
96 | change_c0_config(CONF_CM_CMASK, 0x5); | |
97 | ||
0ee958e1 PB |
98 | /* Core 0 is powered up (we're running on it) */ |
99 | bitmap_set(core_power, 0, 1); | |
100 | ||
0ee958e1 | 101 | /* Initialise core 0 */ |
245a7868 | 102 | mips_cps_core_init(); |
0ee958e1 PB |
103 | |
104 | /* Make core 0 coherent with everything */ | |
105 | write_gcr_cl_coherence(0xff); | |
90db024f | 106 | |
5a3e7c02 PB |
107 | if (mips_cm_revision() >= CM_REV_CM3) { |
108 | core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); | |
109 | write_gcr_bev_base(core_entry); | |
110 | } | |
111 | ||
90db024f NC |
112 | #ifdef CONFIG_MIPS_MT_FPAFF |
113 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | |
114 | if (cpu_has_fpu) | |
7363cb7d | 115 | cpumask_set_cpu(0, &mt_fpu_cpumask); |
90db024f | 116 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
0ee958e1 PB |
117 | } |
118 | ||
119 | static void __init cps_prepare_cpus(unsigned int max_cpus) | |
120 | { | |
5c399f6e PB |
121 | unsigned ncores, core_vpes, c, cca; |
122 | bool cca_unsuitable; | |
0f4d3d11 | 123 | u32 *entry_code; |
245a7868 | 124 | |
0ee958e1 | 125 | mips_mt_set_cpuoptions(); |
245a7868 | 126 | |
5c399f6e PB |
127 | /* Detect whether the CCA is unsuited to multi-core SMP */ |
128 | cca = read_c0_config() & CONF_CM_CMASK; | |
129 | switch (cca) { | |
130 | case 0x4: /* CWBE */ | |
131 | case 0x5: /* CWB */ | |
132 | /* The CCA is coherent, multi-core is fine */ | |
133 | cca_unsuitable = false; | |
134 | break; | |
135 | ||
136 | default: | |
137 | /* CCA is not coherent, multi-core is not usable */ | |
138 | cca_unsuitable = true; | |
139 | } | |
140 | ||
141 | /* Warn the user if the CCA prevents multi-core */ | |
142 | ncores = mips_cm_numcores(); | |
143 | if (cca_unsuitable && ncores > 1) { | |
144 | pr_warn("Using only one core due to unsuitable CCA 0x%x\n", | |
145 | cca); | |
146 | ||
147 | for_each_present_cpu(c) { | |
148 | if (cpu_data[c].core) | |
149 | set_cpu_present(c, false); | |
150 | } | |
151 | } | |
152 | ||
0155a065 PB |
153 | /* |
154 | * Patch the start of mips_cps_core_entry to provide: | |
155 | * | |
0155a065 PB |
156 | * s0 = kseg0 CCA |
157 | */ | |
0f4d3d11 | 158 | entry_code = (u32 *)&mips_cps_core_entry; |
0155a065 | 159 | uasm_i_addiu(&entry_code, 16, 0, cca); |
0fc0708a PB |
160 | blast_dcache_range((unsigned long)&mips_cps_core_entry, |
161 | (unsigned long)entry_code); | |
162 | bc_wback_inv((unsigned long)&mips_cps_core_entry, | |
163 | (void *)entry_code - (void *)&mips_cps_core_entry); | |
164 | __sync(); | |
0f4d3d11 | 165 | |
245a7868 | 166 | /* Allocate core boot configuration structs */ |
245a7868 PB |
167 | mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), |
168 | GFP_KERNEL); | |
169 | if (!mips_cps_core_bootcfg) { | |
170 | pr_err("Failed to allocate boot config for %u cores\n", ncores); | |
171 | goto err_out; | |
172 | } | |
173 | ||
174 | /* Allocate VPE boot configuration structs */ | |
175 | for (c = 0; c < ncores; c++) { | |
176 | core_vpes = core_vpe_count(c); | |
177 | mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, | |
178 | sizeof(*mips_cps_core_bootcfg[c].vpe_config), | |
179 | GFP_KERNEL); | |
180 | if (!mips_cps_core_bootcfg[c].vpe_config) { | |
181 | pr_err("Failed to allocate %u VPE boot configs\n", | |
182 | core_vpes); | |
183 | goto err_out; | |
184 | } | |
185 | } | |
186 | ||
187 | /* Mark this CPU as booted */ | |
188 | atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask, | |
189 | 1 << cpu_vpe_id(¤t_cpu_data)); | |
190 | ||
191 | return; | |
192 | err_out: | |
193 | /* Clean up allocations */ | |
194 | if (mips_cps_core_bootcfg) { | |
195 | for (c = 0; c < ncores; c++) | |
196 | kfree(mips_cps_core_bootcfg[c].vpe_config); | |
197 | kfree(mips_cps_core_bootcfg); | |
198 | mips_cps_core_bootcfg = NULL; | |
199 | } | |
200 | ||
201 | /* Effectively disable SMP by declaring CPUs not present */ | |
202 | for_each_possible_cpu(c) { | |
203 | if (c == 0) | |
204 | continue; | |
205 | set_cpu_present(c, false); | |
206 | } | |
0ee958e1 PB |
207 | } |
208 | ||
245a7868 | 209 | static void boot_core(unsigned core) |
0ee958e1 | 210 | { |
a8c20614 PB |
211 | u32 access, stat, seq_state; |
212 | unsigned timeout; | |
0ee958e1 PB |
213 | |
214 | /* Select the appropriate core */ | |
4ede3161 | 215 | mips_cm_lock_other(core, 0); |
0ee958e1 PB |
216 | |
217 | /* Set its reset vector */ | |
218 | write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); | |
219 | ||
220 | /* Ensure its coherency is disabled */ | |
221 | write_gcr_co_coherence(0); | |
222 | ||
497e803e MR |
223 | /* Start it with the legacy memory map and exception base */ |
224 | write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB); | |
225 | ||
0ee958e1 PB |
226 | /* Ensure the core can access the GCRs */ |
227 | access = read_gcr_access(); | |
245a7868 | 228 | access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core); |
0ee958e1 PB |
229 | write_gcr_access(access); |
230 | ||
0ee958e1 | 231 | if (mips_cpc_present()) { |
0ee958e1 | 232 | /* Reset the core */ |
dd9233d0 | 233 | mips_cpc_lock_other(core); |
5a3e7c02 PB |
234 | |
235 | if (mips_cm_revision() >= CM_REV_CM3) { | |
236 | /* Run VP0 following the reset */ | |
237 | write_cpc_co_vp_run(0x1); | |
238 | ||
239 | /* | |
240 | * Ensure that the VP_RUN register is written before the | |
241 | * core leaves reset. | |
242 | */ | |
243 | wmb(); | |
244 | } | |
245 | ||
0ee958e1 | 246 | write_cpc_co_cmd(CPC_Cx_CMD_RESET); |
a8c20614 PB |
247 | |
248 | timeout = 100; | |
249 | while (true) { | |
250 | stat = read_cpc_co_stat_conf(); | |
251 | seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK; | |
252 | ||
253 | /* U6 == coherent execution, ie. the core is up */ | |
254 | if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6) | |
255 | break; | |
256 | ||
257 | /* Delay a little while before we start warning */ | |
258 | if (timeout) { | |
259 | timeout--; | |
260 | mdelay(10); | |
261 | continue; | |
262 | } | |
263 | ||
264 | pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n", | |
265 | core, stat); | |
266 | mdelay(1000); | |
267 | } | |
268 | ||
dd9233d0 | 269 | mips_cpc_unlock_other(); |
0ee958e1 PB |
270 | } else { |
271 | /* Take the core out of reset */ | |
272 | write_gcr_co_reset_release(0); | |
273 | } | |
274 | ||
4ede3161 PB |
275 | mips_cm_unlock_other(); |
276 | ||
0ee958e1 | 277 | /* The core is now powered up */ |
245a7868 | 278 | bitmap_set(core_power, core, 1); |
0ee958e1 PB |
279 | } |
280 | ||
245a7868 | 281 | static void remote_vpe_boot(void *dummy) |
0ee958e1 | 282 | { |
f12401d7 PB |
283 | unsigned core = current_cpu_data.core; |
284 | struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; | |
285 | ||
286 | mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data)); | |
0ee958e1 PB |
287 | } |
288 | ||
289 | static void cps_boot_secondary(int cpu, struct task_struct *idle) | |
290 | { | |
245a7868 PB |
291 | unsigned core = cpu_data[cpu].core; |
292 | unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); | |
293 | struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; | |
294 | struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; | |
5a3e7c02 | 295 | unsigned long core_entry; |
0ee958e1 PB |
296 | unsigned int remote; |
297 | int err; | |
298 | ||
245a7868 PB |
299 | vpe_cfg->pc = (unsigned long)&smp_bootstrap; |
300 | vpe_cfg->sp = __KSTK_TOS(idle); | |
301 | vpe_cfg->gp = (unsigned long)task_thread_info(idle); | |
0ee958e1 | 302 | |
245a7868 PB |
303 | atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); |
304 | ||
1d8f1f5a | 305 | preempt_disable(); |
0ee958e1 | 306 | |
245a7868 | 307 | if (!test_bit(core, core_power)) { |
0ee958e1 | 308 | /* Boot a VPE on a powered down core */ |
245a7868 | 309 | boot_core(core); |
1d8f1f5a | 310 | goto out; |
0ee958e1 PB |
311 | } |
312 | ||
5a3e7c02 PB |
313 | if (cpu_has_vp) { |
314 | mips_cm_lock_other(core, vpe_id); | |
315 | core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); | |
316 | write_gcr_co_reset_base(core_entry); | |
317 | mips_cm_unlock_other(); | |
318 | } | |
319 | ||
245a7868 | 320 | if (core != current_cpu_data.core) { |
0ee958e1 PB |
321 | /* Boot a VPE on another powered up core */ |
322 | for (remote = 0; remote < NR_CPUS; remote++) { | |
245a7868 | 323 | if (cpu_data[remote].core != core) |
0ee958e1 PB |
324 | continue; |
325 | if (cpu_online(remote)) | |
326 | break; | |
327 | } | |
328 | BUG_ON(remote >= NR_CPUS); | |
329 | ||
245a7868 PB |
330 | err = smp_call_function_single(remote, remote_vpe_boot, |
331 | NULL, 1); | |
0ee958e1 PB |
332 | if (err) |
333 | panic("Failed to call remote CPU\n"); | |
1d8f1f5a | 334 | goto out; |
0ee958e1 PB |
335 | } |
336 | ||
5a3e7c02 | 337 | BUG_ON(!cpu_has_mipsmt && !cpu_has_vp); |
0ee958e1 PB |
338 | |
339 | /* Boot a VPE on this core */ | |
f12401d7 | 340 | mips_cps_boot_vpes(core_cfg, vpe_id); |
1d8f1f5a PB |
341 | out: |
342 | preempt_enable(); | |
0ee958e1 PB |
343 | } |
344 | ||
345 | static void cps_init_secondary(void) | |
346 | { | |
347 | /* Disable MT - we only want to run 1 TC per VPE */ | |
348 | if (cpu_has_mipsmt) | |
349 | dmt(); | |
350 | ||
ba1c0a49 PB |
351 | if (mips_cm_revision() >= CM_REV_CM3) { |
352 | unsigned ident = gic_read_local_vp_id(); | |
353 | ||
354 | /* | |
355 | * Ensure that our calculation of the VP ID matches up with | |
356 | * what the GIC reports, otherwise we'll have configured | |
357 | * interrupts incorrectly. | |
358 | */ | |
359 | BUG_ON(ident != mips_cm_vp_id(smp_processor_id())); | |
360 | } | |
361 | ||
d642e4e7 PB |
362 | if (cpu_has_veic) |
363 | clear_c0_status(ST0_IM); | |
364 | else | |
365 | change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | | |
366 | STATUSF_IP4 | STATUSF_IP5 | | |
367 | STATUSF_IP6 | STATUSF_IP7); | |
0ee958e1 PB |
368 | } |
369 | ||
370 | static void cps_smp_finish(void) | |
371 | { | |
372 | write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ)); | |
373 | ||
374 | #ifdef CONFIG_MIPS_MT_FPAFF | |
375 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | |
376 | if (cpu_has_fpu) | |
8dd92891 | 377 | cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); |
0ee958e1 PB |
378 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
379 | ||
380 | local_irq_enable(); | |
381 | } | |
382 | ||
1d8f1f5a PB |
383 | #ifdef CONFIG_HOTPLUG_CPU |
384 | ||
385 | static int cps_cpu_disable(void) | |
386 | { | |
387 | unsigned cpu = smp_processor_id(); | |
388 | struct core_boot_config *core_cfg; | |
389 | ||
390 | if (!cpu) | |
391 | return -EBUSY; | |
392 | ||
393 | if (!cps_pm_support_state(CPS_PM_POWER_GATED)) | |
394 | return -EINVAL; | |
395 | ||
396 | core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; | |
397 | atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); | |
e114ba20 | 398 | smp_mb__after_atomic(); |
1d8f1f5a | 399 | set_cpu_online(cpu, false); |
8dd92891 | 400 | cpumask_clear_cpu(cpu, &cpu_callin_map); |
1d8f1f5a PB |
401 | |
402 | return 0; | |
403 | } | |
404 | ||
405 | static DECLARE_COMPLETION(cpu_death_chosen); | |
406 | static unsigned cpu_death_sibling; | |
407 | static enum { | |
408 | CPU_DEATH_HALT, | |
409 | CPU_DEATH_POWER, | |
410 | } cpu_death; | |
411 | ||
412 | void play_dead(void) | |
413 | { | |
414 | unsigned cpu, core; | |
415 | ||
416 | local_irq_disable(); | |
417 | idle_task_exit(); | |
418 | cpu = smp_processor_id(); | |
419 | cpu_death = CPU_DEATH_POWER; | |
420 | ||
421 | if (cpu_has_mipsmt) { | |
422 | core = cpu_data[cpu].core; | |
423 | ||
424 | /* Look for another online VPE within the core */ | |
425 | for_each_online_cpu(cpu_death_sibling) { | |
426 | if (cpu_data[cpu_death_sibling].core != core) | |
427 | continue; | |
428 | ||
429 | /* | |
430 | * There is an online VPE within the core. Just halt | |
431 | * this TC and leave the core alone. | |
432 | */ | |
433 | cpu_death = CPU_DEATH_HALT; | |
434 | break; | |
435 | } | |
436 | } | |
437 | ||
438 | /* This CPU has chosen its way out */ | |
439 | complete(&cpu_death_chosen); | |
440 | ||
441 | if (cpu_death == CPU_DEATH_HALT) { | |
442 | /* Halt this TC */ | |
443 | write_c0_tchalt(TCHALT_H); | |
444 | instruction_hazard(); | |
445 | } else { | |
446 | /* Power down the core */ | |
447 | cps_pm_enter_state(CPS_PM_POWER_GATED); | |
448 | } | |
449 | ||
450 | /* This should never be reached */ | |
451 | panic("Failed to offline CPU %u", cpu); | |
452 | } | |
453 | ||
454 | static void wait_for_sibling_halt(void *ptr_cpu) | |
455 | { | |
fd5ed306 | 456 | unsigned cpu = (unsigned long)ptr_cpu; |
c90e49f2 | 457 | unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); |
1d8f1f5a PB |
458 | unsigned halted; |
459 | unsigned long flags; | |
460 | ||
461 | do { | |
462 | local_irq_save(flags); | |
463 | settc(vpe_id); | |
464 | halted = read_tc_c0_tchalt(); | |
465 | local_irq_restore(flags); | |
466 | } while (!(halted & TCHALT_H)); | |
467 | } | |
468 | ||
469 | static void cps_cpu_die(unsigned int cpu) | |
470 | { | |
471 | unsigned core = cpu_data[cpu].core; | |
472 | unsigned stat; | |
473 | int err; | |
474 | ||
475 | /* Wait for the cpu to choose its way out */ | |
476 | if (!wait_for_completion_timeout(&cpu_death_chosen, | |
477 | msecs_to_jiffies(5000))) { | |
478 | pr_err("CPU%u: didn't offline\n", cpu); | |
479 | return; | |
480 | } | |
481 | ||
482 | /* | |
483 | * Now wait for the CPU to actually offline. Without doing this that | |
484 | * offlining may race with one or more of: | |
485 | * | |
486 | * - Onlining the CPU again. | |
487 | * - Powering down the core if another VPE within it is offlined. | |
488 | * - A sibling VPE entering a non-coherent state. | |
489 | * | |
490 | * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing | |
491 | * with which we could race, so do nothing. | |
492 | */ | |
493 | if (cpu_death == CPU_DEATH_POWER) { | |
494 | /* | |
495 | * Wait for the core to enter a powered down or clock gated | |
496 | * state, the latter happening when a JTAG probe is connected | |
497 | * in which case the CPC will refuse to power down the core. | |
498 | */ | |
499 | do { | |
500 | mips_cpc_lock_other(core); | |
501 | stat = read_cpc_co_stat_conf(); | |
502 | stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; | |
503 | mips_cpc_unlock_other(); | |
504 | } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && | |
505 | stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && | |
506 | stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); | |
507 | ||
508 | /* Indicate the core is powered off */ | |
509 | bitmap_clear(core_power, core, 1); | |
510 | } else if (cpu_has_mipsmt) { | |
511 | /* | |
512 | * Have a CPU with access to the offlined CPUs registers wait | |
513 | * for its TC to halt. | |
514 | */ | |
515 | err = smp_call_function_single(cpu_death_sibling, | |
516 | wait_for_sibling_halt, | |
fd5ed306 | 517 | (void *)(unsigned long)cpu, 1); |
1d8f1f5a PB |
518 | if (err) |
519 | panic("Failed to call remote sibling CPU\n"); | |
520 | } | |
521 | } | |
522 | ||
523 | #endif /* CONFIG_HOTPLUG_CPU */ | |
524 | ||
0ee958e1 PB |
525 | static struct plat_smp_ops cps_smp_ops = { |
526 | .smp_setup = cps_smp_setup, | |
527 | .prepare_cpus = cps_prepare_cpus, | |
528 | .boot_secondary = cps_boot_secondary, | |
529 | .init_secondary = cps_init_secondary, | |
530 | .smp_finish = cps_smp_finish, | |
bb11cff3 QY |
531 | .send_ipi_single = mips_smp_send_ipi_single, |
532 | .send_ipi_mask = mips_smp_send_ipi_mask, | |
1d8f1f5a PB |
533 | #ifdef CONFIG_HOTPLUG_CPU |
534 | .cpu_disable = cps_cpu_disable, | |
535 | .cpu_die = cps_cpu_die, | |
536 | #endif | |
0ee958e1 PB |
537 | }; |
538 | ||
68c1232f PB |
539 | bool mips_cps_smp_in_use(void) |
540 | { | |
541 | extern struct plat_smp_ops *mp_ops; | |
542 | return mp_ops == &cps_smp_ops; | |
543 | } | |
544 | ||
0ee958e1 PB |
545 | int register_cps_smp_ops(void) |
546 | { | |
547 | if (!mips_cm_present()) { | |
548 | pr_warn("MIPS CPS SMP unable to proceed without a CM\n"); | |
549 | return -ENODEV; | |
550 | } | |
551 | ||
552 | /* check we have a GIC - we need one for IPIs */ | |
553 | if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) { | |
554 | pr_warn("MIPS CPS SMP unable to proceed without a GIC\n"); | |
555 | return -ENODEV; | |
556 | } | |
557 | ||
558 | register_smp_ops(&cps_smp_ops); | |
559 | return 0; | |
560 | } |