Commit | Line | Data |
---|---|---|
0ee958e1 PB |
1 | /* |
2 | * Copyright (C) 2013 Imagination Technologies | |
3 | * Author: Paul Burton <paul.burton@imgtec.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License as published by the | |
7 | * Free Software Foundation; either version 2 of the License, or (at your | |
8 | * option) any later version. | |
9 | */ | |
10 | ||
11 | #include <linux/io.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/smp.h> | |
15 | #include <linux/types.h> | |
16 | ||
0fc0708a | 17 | #include <asm/bcache.h> |
0ee958e1 PB |
18 | #include <asm/gic.h> |
19 | #include <asm/mips-cm.h> | |
20 | #include <asm/mips-cpc.h> | |
21 | #include <asm/mips_mt.h> | |
22 | #include <asm/mipsregs.h> | |
1d8f1f5a | 23 | #include <asm/pm-cps.h> |
0fc0708a | 24 | #include <asm/r4kcache.h> |
0ee958e1 PB |
25 | #include <asm/smp-cps.h> |
26 | #include <asm/time.h> | |
27 | #include <asm/uasm.h> | |
28 | ||
29 | static DECLARE_BITMAP(core_power, NR_CPUS); | |
30 | ||
245a7868 | 31 | struct core_boot_config *mips_cps_core_bootcfg; |
0ee958e1 | 32 | |
245a7868 | 33 | static unsigned core_vpe_count(unsigned core) |
0ee958e1 | 34 | { |
245a7868 | 35 | unsigned cfg; |
0ee958e1 | 36 | |
245a7868 PB |
37 | if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) |
38 | return 1; | |
0ee958e1 | 39 | |
245a7868 PB |
40 | write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); |
41 | cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; | |
42 | return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; | |
0ee958e1 PB |
43 | } |
44 | ||
45 | static void __init cps_smp_setup(void) | |
46 | { | |
47 | unsigned int ncores, nvpes, core_vpes; | |
48 | int c, v; | |
0ee958e1 PB |
49 | |
50 | /* Detect & record VPE topology */ | |
51 | ncores = mips_cm_numcores(); | |
52 | pr_info("VPE topology "); | |
53 | for (c = nvpes = 0; c < ncores; c++) { | |
245a7868 | 54 | core_vpes = core_vpe_count(c); |
0ee958e1 PB |
55 | pr_cont("%c%u", c ? ',' : '{', core_vpes); |
56 | ||
245a7868 PB |
57 | /* Use the number of VPEs in core 0 for smp_num_siblings */ |
58 | if (!c) | |
59 | smp_num_siblings = core_vpes; | |
60 | ||
0ee958e1 PB |
61 | for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { |
62 | cpu_data[nvpes + v].core = c; | |
63 | #ifdef CONFIG_MIPS_MT_SMP | |
64 | cpu_data[nvpes + v].vpe_id = v; | |
65 | #endif | |
66 | } | |
67 | ||
68 | nvpes += core_vpes; | |
69 | } | |
70 | pr_cont("} total %u\n", nvpes); | |
71 | ||
72 | /* Indicate present CPUs (CPU being synonymous with VPE) */ | |
73 | for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { | |
74 | set_cpu_possible(v, true); | |
75 | set_cpu_present(v, true); | |
76 | __cpu_number_map[v] = v; | |
77 | __cpu_logical_map[v] = v; | |
78 | } | |
79 | ||
33b68665 PB |
80 | /* Set a coherent default CCA (CWB) */ |
81 | change_c0_config(CONF_CM_CMASK, 0x5); | |
82 | ||
0ee958e1 PB |
83 | /* Core 0 is powered up (we're running on it) */ |
84 | bitmap_set(core_power, 0, 1); | |
85 | ||
0ee958e1 | 86 | /* Initialise core 0 */ |
245a7868 | 87 | mips_cps_core_init(); |
0ee958e1 PB |
88 | |
89 | /* Make core 0 coherent with everything */ | |
90 | write_gcr_cl_coherence(0xff); | |
91 | } | |
92 | ||
93 | static void __init cps_prepare_cpus(unsigned int max_cpus) | |
94 | { | |
5c399f6e PB |
95 | unsigned ncores, core_vpes, c, cca; |
96 | bool cca_unsuitable; | |
0f4d3d11 | 97 | u32 *entry_code; |
245a7868 | 98 | |
0ee958e1 | 99 | mips_mt_set_cpuoptions(); |
245a7868 | 100 | |
5c399f6e PB |
101 | /* Detect whether the CCA is unsuited to multi-core SMP */ |
102 | cca = read_c0_config() & CONF_CM_CMASK; | |
103 | switch (cca) { | |
104 | case 0x4: /* CWBE */ | |
105 | case 0x5: /* CWB */ | |
106 | /* The CCA is coherent, multi-core is fine */ | |
107 | cca_unsuitable = false; | |
108 | break; | |
109 | ||
110 | default: | |
111 | /* CCA is not coherent, multi-core is not usable */ | |
112 | cca_unsuitable = true; | |
113 | } | |
114 | ||
115 | /* Warn the user if the CCA prevents multi-core */ | |
116 | ncores = mips_cm_numcores(); | |
117 | if (cca_unsuitable && ncores > 1) { | |
118 | pr_warn("Using only one core due to unsuitable CCA 0x%x\n", | |
119 | cca); | |
120 | ||
121 | for_each_present_cpu(c) { | |
122 | if (cpu_data[c].core) | |
123 | set_cpu_present(c, false); | |
124 | } | |
125 | } | |
126 | ||
0155a065 PB |
127 | /* |
128 | * Patch the start of mips_cps_core_entry to provide: | |
129 | * | |
130 | * v0 = CM base address | |
131 | * s0 = kseg0 CCA | |
132 | */ | |
0f4d3d11 PB |
133 | entry_code = (u32 *)&mips_cps_core_entry; |
134 | UASM_i_LA(&entry_code, 3, (long)mips_cm_base); | |
0155a065 | 135 | uasm_i_addiu(&entry_code, 16, 0, cca); |
0fc0708a PB |
136 | blast_dcache_range((unsigned long)&mips_cps_core_entry, |
137 | (unsigned long)entry_code); | |
138 | bc_wback_inv((unsigned long)&mips_cps_core_entry, | |
139 | (void *)entry_code - (void *)&mips_cps_core_entry); | |
140 | __sync(); | |
0f4d3d11 | 141 | |
245a7868 | 142 | /* Allocate core boot configuration structs */ |
245a7868 PB |
143 | mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), |
144 | GFP_KERNEL); | |
145 | if (!mips_cps_core_bootcfg) { | |
146 | pr_err("Failed to allocate boot config for %u cores\n", ncores); | |
147 | goto err_out; | |
148 | } | |
149 | ||
150 | /* Allocate VPE boot configuration structs */ | |
151 | for (c = 0; c < ncores; c++) { | |
152 | core_vpes = core_vpe_count(c); | |
153 | mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, | |
154 | sizeof(*mips_cps_core_bootcfg[c].vpe_config), | |
155 | GFP_KERNEL); | |
156 | if (!mips_cps_core_bootcfg[c].vpe_config) { | |
157 | pr_err("Failed to allocate %u VPE boot configs\n", | |
158 | core_vpes); | |
159 | goto err_out; | |
160 | } | |
161 | } | |
162 | ||
163 | /* Mark this CPU as booted */ | |
164 | atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask, | |
165 | 1 << cpu_vpe_id(¤t_cpu_data)); | |
166 | ||
167 | return; | |
168 | err_out: | |
169 | /* Clean up allocations */ | |
170 | if (mips_cps_core_bootcfg) { | |
171 | for (c = 0; c < ncores; c++) | |
172 | kfree(mips_cps_core_bootcfg[c].vpe_config); | |
173 | kfree(mips_cps_core_bootcfg); | |
174 | mips_cps_core_bootcfg = NULL; | |
175 | } | |
176 | ||
177 | /* Effectively disable SMP by declaring CPUs not present */ | |
178 | for_each_possible_cpu(c) { | |
179 | if (c == 0) | |
180 | continue; | |
181 | set_cpu_present(c, false); | |
182 | } | |
0ee958e1 PB |
183 | } |
184 | ||
245a7868 | 185 | static void boot_core(unsigned core) |
0ee958e1 PB |
186 | { |
187 | u32 access; | |
188 | ||
189 | /* Select the appropriate core */ | |
245a7868 | 190 | write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); |
0ee958e1 PB |
191 | |
192 | /* Set its reset vector */ | |
193 | write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); | |
194 | ||
195 | /* Ensure its coherency is disabled */ | |
196 | write_gcr_co_coherence(0); | |
197 | ||
198 | /* Ensure the core can access the GCRs */ | |
199 | access = read_gcr_access(); | |
245a7868 | 200 | access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core); |
0ee958e1 PB |
201 | write_gcr_access(access); |
202 | ||
0ee958e1 | 203 | if (mips_cpc_present()) { |
0ee958e1 | 204 | /* Reset the core */ |
dd9233d0 | 205 | mips_cpc_lock_other(core); |
0ee958e1 | 206 | write_cpc_co_cmd(CPC_Cx_CMD_RESET); |
dd9233d0 | 207 | mips_cpc_unlock_other(); |
0ee958e1 PB |
208 | } else { |
209 | /* Take the core out of reset */ | |
210 | write_gcr_co_reset_release(0); | |
211 | } | |
212 | ||
213 | /* The core is now powered up */ | |
245a7868 | 214 | bitmap_set(core_power, core, 1); |
0ee958e1 PB |
215 | } |
216 | ||
245a7868 | 217 | static void remote_vpe_boot(void *dummy) |
0ee958e1 | 218 | { |
245a7868 | 219 | mips_cps_boot_vpes(); |
0ee958e1 PB |
220 | } |
221 | ||
222 | static void cps_boot_secondary(int cpu, struct task_struct *idle) | |
223 | { | |
245a7868 PB |
224 | unsigned core = cpu_data[cpu].core; |
225 | unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); | |
226 | struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; | |
227 | struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; | |
0ee958e1 PB |
228 | unsigned int remote; |
229 | int err; | |
230 | ||
245a7868 PB |
231 | vpe_cfg->pc = (unsigned long)&smp_bootstrap; |
232 | vpe_cfg->sp = __KSTK_TOS(idle); | |
233 | vpe_cfg->gp = (unsigned long)task_thread_info(idle); | |
0ee958e1 | 234 | |
245a7868 PB |
235 | atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); |
236 | ||
1d8f1f5a | 237 | preempt_disable(); |
0ee958e1 | 238 | |
245a7868 | 239 | if (!test_bit(core, core_power)) { |
0ee958e1 | 240 | /* Boot a VPE on a powered down core */ |
245a7868 | 241 | boot_core(core); |
1d8f1f5a | 242 | goto out; |
0ee958e1 PB |
243 | } |
244 | ||
245a7868 | 245 | if (core != current_cpu_data.core) { |
0ee958e1 PB |
246 | /* Boot a VPE on another powered up core */ |
247 | for (remote = 0; remote < NR_CPUS; remote++) { | |
245a7868 | 248 | if (cpu_data[remote].core != core) |
0ee958e1 PB |
249 | continue; |
250 | if (cpu_online(remote)) | |
251 | break; | |
252 | } | |
253 | BUG_ON(remote >= NR_CPUS); | |
254 | ||
245a7868 PB |
255 | err = smp_call_function_single(remote, remote_vpe_boot, |
256 | NULL, 1); | |
0ee958e1 PB |
257 | if (err) |
258 | panic("Failed to call remote CPU\n"); | |
1d8f1f5a | 259 | goto out; |
0ee958e1 PB |
260 | } |
261 | ||
262 | BUG_ON(!cpu_has_mipsmt); | |
263 | ||
264 | /* Boot a VPE on this core */ | |
245a7868 | 265 | mips_cps_boot_vpes(); |
1d8f1f5a PB |
266 | out: |
267 | preempt_enable(); | |
0ee958e1 PB |
268 | } |
269 | ||
270 | static void cps_init_secondary(void) | |
271 | { | |
272 | /* Disable MT - we only want to run 1 TC per VPE */ | |
273 | if (cpu_has_mipsmt) | |
274 | dmt(); | |
275 | ||
ff1e29ad AB |
276 | change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | |
277 | STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7); | |
0ee958e1 PB |
278 | } |
279 | ||
280 | static void cps_smp_finish(void) | |
281 | { | |
282 | write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ)); | |
283 | ||
284 | #ifdef CONFIG_MIPS_MT_FPAFF | |
285 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | |
286 | if (cpu_has_fpu) | |
287 | cpu_set(smp_processor_id(), mt_fpu_cpumask); | |
288 | #endif /* CONFIG_MIPS_MT_FPAFF */ | |
289 | ||
290 | local_irq_enable(); | |
291 | } | |
292 | ||
1d8f1f5a PB |
293 | #ifdef CONFIG_HOTPLUG_CPU |
294 | ||
295 | static int cps_cpu_disable(void) | |
296 | { | |
297 | unsigned cpu = smp_processor_id(); | |
298 | struct core_boot_config *core_cfg; | |
299 | ||
300 | if (!cpu) | |
301 | return -EBUSY; | |
302 | ||
303 | if (!cps_pm_support_state(CPS_PM_POWER_GATED)) | |
304 | return -EINVAL; | |
305 | ||
306 | core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; | |
307 | atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); | |
e114ba20 | 308 | smp_mb__after_atomic(); |
1d8f1f5a PB |
309 | set_cpu_online(cpu, false); |
310 | cpu_clear(cpu, cpu_callin_map); | |
311 | ||
312 | return 0; | |
313 | } | |
314 | ||
315 | static DECLARE_COMPLETION(cpu_death_chosen); | |
316 | static unsigned cpu_death_sibling; | |
317 | static enum { | |
318 | CPU_DEATH_HALT, | |
319 | CPU_DEATH_POWER, | |
320 | } cpu_death; | |
321 | ||
322 | void play_dead(void) | |
323 | { | |
324 | unsigned cpu, core; | |
325 | ||
326 | local_irq_disable(); | |
327 | idle_task_exit(); | |
328 | cpu = smp_processor_id(); | |
329 | cpu_death = CPU_DEATH_POWER; | |
330 | ||
331 | if (cpu_has_mipsmt) { | |
332 | core = cpu_data[cpu].core; | |
333 | ||
334 | /* Look for another online VPE within the core */ | |
335 | for_each_online_cpu(cpu_death_sibling) { | |
336 | if (cpu_data[cpu_death_sibling].core != core) | |
337 | continue; | |
338 | ||
339 | /* | |
340 | * There is an online VPE within the core. Just halt | |
341 | * this TC and leave the core alone. | |
342 | */ | |
343 | cpu_death = CPU_DEATH_HALT; | |
344 | break; | |
345 | } | |
346 | } | |
347 | ||
348 | /* This CPU has chosen its way out */ | |
349 | complete(&cpu_death_chosen); | |
350 | ||
351 | if (cpu_death == CPU_DEATH_HALT) { | |
352 | /* Halt this TC */ | |
353 | write_c0_tchalt(TCHALT_H); | |
354 | instruction_hazard(); | |
355 | } else { | |
356 | /* Power down the core */ | |
357 | cps_pm_enter_state(CPS_PM_POWER_GATED); | |
358 | } | |
359 | ||
360 | /* This should never be reached */ | |
361 | panic("Failed to offline CPU %u", cpu); | |
362 | } | |
363 | ||
364 | static void wait_for_sibling_halt(void *ptr_cpu) | |
365 | { | |
366 | unsigned cpu = (unsigned)ptr_cpu; | |
c90e49f2 | 367 | unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); |
1d8f1f5a PB |
368 | unsigned halted; |
369 | unsigned long flags; | |
370 | ||
371 | do { | |
372 | local_irq_save(flags); | |
373 | settc(vpe_id); | |
374 | halted = read_tc_c0_tchalt(); | |
375 | local_irq_restore(flags); | |
376 | } while (!(halted & TCHALT_H)); | |
377 | } | |
378 | ||
379 | static void cps_cpu_die(unsigned int cpu) | |
380 | { | |
381 | unsigned core = cpu_data[cpu].core; | |
382 | unsigned stat; | |
383 | int err; | |
384 | ||
385 | /* Wait for the cpu to choose its way out */ | |
386 | if (!wait_for_completion_timeout(&cpu_death_chosen, | |
387 | msecs_to_jiffies(5000))) { | |
388 | pr_err("CPU%u: didn't offline\n", cpu); | |
389 | return; | |
390 | } | |
391 | ||
392 | /* | |
393 | * Now wait for the CPU to actually offline. Without doing this that | |
394 | * offlining may race with one or more of: | |
395 | * | |
396 | * - Onlining the CPU again. | |
397 | * - Powering down the core if another VPE within it is offlined. | |
398 | * - A sibling VPE entering a non-coherent state. | |
399 | * | |
400 | * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing | |
401 | * with which we could race, so do nothing. | |
402 | */ | |
403 | if (cpu_death == CPU_DEATH_POWER) { | |
404 | /* | |
405 | * Wait for the core to enter a powered down or clock gated | |
406 | * state, the latter happening when a JTAG probe is connected | |
407 | * in which case the CPC will refuse to power down the core. | |
408 | */ | |
409 | do { | |
410 | mips_cpc_lock_other(core); | |
411 | stat = read_cpc_co_stat_conf(); | |
412 | stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; | |
413 | mips_cpc_unlock_other(); | |
414 | } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && | |
415 | stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && | |
416 | stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); | |
417 | ||
418 | /* Indicate the core is powered off */ | |
419 | bitmap_clear(core_power, core, 1); | |
420 | } else if (cpu_has_mipsmt) { | |
421 | /* | |
422 | * Have a CPU with access to the offlined CPUs registers wait | |
423 | * for its TC to halt. | |
424 | */ | |
425 | err = smp_call_function_single(cpu_death_sibling, | |
426 | wait_for_sibling_halt, | |
427 | (void *)cpu, 1); | |
428 | if (err) | |
429 | panic("Failed to call remote sibling CPU\n"); | |
430 | } | |
431 | } | |
432 | ||
433 | #endif /* CONFIG_HOTPLUG_CPU */ | |
434 | ||
0ee958e1 PB |
435 | static struct plat_smp_ops cps_smp_ops = { |
436 | .smp_setup = cps_smp_setup, | |
437 | .prepare_cpus = cps_prepare_cpus, | |
438 | .boot_secondary = cps_boot_secondary, | |
439 | .init_secondary = cps_init_secondary, | |
440 | .smp_finish = cps_smp_finish, | |
441 | .send_ipi_single = gic_send_ipi_single, | |
442 | .send_ipi_mask = gic_send_ipi_mask, | |
1d8f1f5a PB |
443 | #ifdef CONFIG_HOTPLUG_CPU |
444 | .cpu_disable = cps_cpu_disable, | |
445 | .cpu_die = cps_cpu_die, | |
446 | #endif | |
0ee958e1 PB |
447 | }; |
448 | ||
68c1232f PB |
449 | bool mips_cps_smp_in_use(void) |
450 | { | |
451 | extern struct plat_smp_ops *mp_ops; | |
452 | return mp_ops == &cps_smp_ops; | |
453 | } | |
454 | ||
0ee958e1 PB |
455 | int register_cps_smp_ops(void) |
456 | { | |
457 | if (!mips_cm_present()) { | |
458 | pr_warn("MIPS CPS SMP unable to proceed without a CM\n"); | |
459 | return -ENODEV; | |
460 | } | |
461 | ||
462 | /* check we have a GIC - we need one for IPIs */ | |
463 | if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) { | |
464 | pr_warn("MIPS CPS SMP unable to proceed without a GIC\n"); | |
465 | return -ENODEV; | |
466 | } | |
467 | ||
468 | register_smp_ops(&cps_smp_ops); | |
469 | return 0; | |
470 | } |