Commit | Line | Data |
---|---|---|
3179d37e PB |
1 | /* |
2 | * Copyright (C) 2014 Imagination Technologies | |
48c834be | 3 | * Author: Paul Burton <paul.burton@mips.com> |
3179d37e PB |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License as published by the | |
7 | * Free Software Foundation; either version 2 of the License, or (at your | |
8 | * option) any later version. | |
9 | */ | |
10 | ||
ba750502 | 11 | #include <linux/cpuhotplug.h> |
3179d37e PB |
12 | #include <linux/init.h> |
13 | #include <linux/percpu.h> | |
14 | #include <linux/slab.h> | |
15 | ||
16 | #include <asm/asm-offsets.h> | |
17 | #include <asm/cacheflush.h> | |
18 | #include <asm/cacheops.h> | |
19 | #include <asm/idle.h> | |
e83f7e02 | 20 | #include <asm/mips-cps.h> |
3179d37e PB |
21 | #include <asm/mipsmtregs.h> |
22 | #include <asm/pm.h> | |
23 | #include <asm/pm-cps.h> | |
24 | #include <asm/smp-cps.h> | |
25 | #include <asm/uasm.h> | |
26 | ||
27 | /* | |
28 | * cps_nc_entry_fn - type of a generated non-coherent state entry function | |
29 | * @online: the count of online coupled VPEs | |
30 | * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count | |
31 | * | |
32 | * The code entering & exiting non-coherent states is generated at runtime | |
33 | * using uasm, in order to ensure that the compiler cannot insert a stray | |
34 | * memory access at an unfortunate time and to allow the generation of optimal | |
35 | * core-specific code particularly for cache routines. If coupled_coherence | |
36 | * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state, | |
37 | * returns the number of VPEs that were in the wait state at the point this | |
38 | * VPE left it. Returns garbage if coupled_coherence is zero or this is not | |
39 | * the entry function for CPS_PM_NC_WAIT. | |
40 | */ | |
41 | typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count); | |
42 | ||
43 | /* | |
44 | * The entry point of the generated non-coherent idle state entry/exit | |
45 | * functions. Actually per-core rather than per-CPU. | |
46 | */ | |
47 | static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT], | |
48 | nc_asm_enter); | |
49 | ||
50 | /* Bitmap indicating which states are supported by the system */ | |
b7fc2cc5 | 51 | static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); |
3179d37e PB |
52 | |
53 | /* | |
54 | * Indicates the number of coupled VPEs ready to operate in a non-coherent | |
55 | * state. Actually per-core rather than per-CPU. | |
56 | */ | |
57 | static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); | |
3179d37e PB |
58 | |
59 | /* Indicates online CPUs coupled with the current CPU */ | |
60 | static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); | |
61 | ||
62 | /* | |
63 | * Used to synchronize entry to deep idle states. Actually per-core rather | |
64 | * than per-CPU. | |
65 | */ | |
66 | static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); | |
67 | ||
68 | /* Saved CPU state across the CPS_PM_POWER_GATED state */ | |
69 | DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); | |
70 | ||
71 | /* A somewhat arbitrary number of labels & relocs for uasm */ | |
ba750502 PB |
72 | static struct uasm_label labels[32]; |
73 | static struct uasm_reloc relocs[32]; | |
3179d37e | 74 | |
3179d37e PB |
75 | enum mips_reg { |
76 | zero, at, v0, v1, a0, a1, a2, a3, | |
77 | t0, t1, t2, t3, t4, t5, t6, t7, | |
78 | s0, s1, s2, s3, s4, s5, s6, s7, | |
79 | t8, t9, k0, k1, gp, sp, fp, ra, | |
80 | }; | |
81 | ||
82 | bool cps_pm_support_state(enum cps_pm_state state) | |
83 | { | |
84 | return test_bit(state, state_support); | |
85 | } | |
86 | ||
87 | static void coupled_barrier(atomic_t *a, unsigned online) | |
88 | { | |
89 | /* | |
90 | * This function is effectively the same as | |
91 | * cpuidle_coupled_parallel_barrier, which can't be used here since | |
92 | * there's no cpuidle device. | |
93 | */ | |
94 | ||
95 | if (!coupled_coherence) | |
96 | return; | |
97 | ||
7c5491b8 | 98 | smp_mb__before_atomic(); |
3179d37e PB |
99 | atomic_inc(a); |
100 | ||
101 | while (atomic_read(a) < online) | |
102 | cpu_relax(); | |
103 | ||
104 | if (atomic_inc_return(a) == online * 2) { | |
105 | atomic_set(a, 0); | |
106 | return; | |
107 | } | |
108 | ||
109 | while (atomic_read(a) > online) | |
110 | cpu_relax(); | |
111 | } | |
112 | ||
113 | int cps_pm_enter_state(enum cps_pm_state state) | |
114 | { | |
115 | unsigned cpu = smp_processor_id(); | |
f875a832 | 116 | unsigned core = cpu_core(¤t_cpu_data); |
3179d37e PB |
117 | unsigned online, left; |
118 | cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); | |
119 | u32 *core_ready_count, *nc_core_ready_count; | |
120 | void *nc_addr; | |
121 | cps_nc_entry_fn entry; | |
122 | struct core_boot_config *core_cfg; | |
123 | struct vpe_boot_config *vpe_cfg; | |
124 | ||
125 | /* Check that there is an entry function for this state */ | |
126 | entry = per_cpu(nc_asm_enter, core)[state]; | |
127 | if (!entry) | |
128 | return -EINVAL; | |
129 | ||
130 | /* Calculate which coupled CPUs (VPEs) are online */ | |
929d4f51 | 131 | #if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6) |
3179d37e PB |
132 | if (cpu_online(cpu)) { |
133 | cpumask_and(coupled_mask, cpu_online_mask, | |
134 | &cpu_sibling_map[cpu]); | |
135 | online = cpumask_weight(coupled_mask); | |
136 | cpumask_clear_cpu(cpu, coupled_mask); | |
137 | } else | |
138 | #endif | |
139 | { | |
140 | cpumask_clear(coupled_mask); | |
141 | online = 1; | |
142 | } | |
143 | ||
144 | /* Setup the VPE to run mips_cps_pm_restore when started again */ | |
97f2645f | 145 | if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { |
064231e5 PB |
146 | /* Power gating relies upon CPS SMP */ |
147 | if (!mips_cps_smp_in_use()) | |
148 | return -EINVAL; | |
149 | ||
3179d37e | 150 | core_cfg = &mips_cps_core_bootcfg[core]; |
c90e49f2 | 151 | vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(¤t_cpu_data)]; |
3179d37e PB |
152 | vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; |
153 | vpe_cfg->gp = (unsigned long)current_thread_info(); | |
154 | vpe_cfg->sp = 0; | |
155 | } | |
156 | ||
157 | /* Indicate that this CPU might not be coherent */ | |
158 | cpumask_clear_cpu(cpu, &cpu_coherent_mask); | |
7c5491b8 | 159 | smp_mb__after_atomic(); |
3179d37e PB |
160 | |
161 | /* Create a non-coherent mapping of the core ready_count */ | |
162 | core_ready_count = per_cpu(ready_count, core); | |
163 | nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), | |
164 | (unsigned long)core_ready_count); | |
165 | nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); | |
166 | nc_core_ready_count = nc_addr; | |
167 | ||
168 | /* Ensure ready_count is zero-initialised before the assembly runs */ | |
169 | ACCESS_ONCE(*nc_core_ready_count) = 0; | |
170 | coupled_barrier(&per_cpu(pm_barrier, core), online); | |
171 | ||
172 | /* Run the generated entry code */ | |
173 | left = entry(online, nc_core_ready_count); | |
174 | ||
175 | /* Remove the non-coherent mapping of ready_count */ | |
176 | kunmap_noncoherent(); | |
177 | ||
178 | /* Indicate that this CPU is definitely coherent */ | |
179 | cpumask_set_cpu(cpu, &cpu_coherent_mask); | |
180 | ||
181 | /* | |
182 | * If this VPE is the first to leave the non-coherent wait state then | |
183 | * it needs to wake up any coupled VPEs still running their wait | |
184 | * instruction so that they return to cpuidle, which can then complete | |
185 | * coordination between the coupled VPEs & provide the governor with | |
186 | * a chance to reflect on the length of time the VPEs were in the | |
187 | * idle state. | |
188 | */ | |
189 | if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online)) | |
190 | arch_send_call_function_ipi_mask(coupled_mask); | |
191 | ||
192 | return 0; | |
193 | } | |
194 | ||
ba750502 PB |
195 | static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, |
196 | struct uasm_reloc **pr, | |
197 | const struct cache_desc *cache, | |
198 | unsigned op, int lbl) | |
3179d37e PB |
199 | { |
200 | unsigned cache_size = cache->ways << cache->waybit; | |
201 | unsigned i; | |
202 | const unsigned unroll_lines = 32; | |
203 | ||
204 | /* If the cache isn't present this function has it easy */ | |
205 | if (cache->flags & MIPS_CACHE_NOT_PRESENT) | |
206 | return; | |
207 | ||
208 | /* Load base address */ | |
209 | UASM_i_LA(pp, t0, (long)CKSEG0); | |
210 | ||
211 | /* Calculate end address */ | |
212 | if (cache_size < 0x8000) | |
213 | uasm_i_addiu(pp, t1, t0, cache_size); | |
214 | else | |
215 | UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); | |
216 | ||
217 | /* Start of cache op loop */ | |
218 | uasm_build_label(pl, *pp, lbl); | |
219 | ||
220 | /* Generate the cache ops */ | |
0f2a1484 MC |
221 | for (i = 0; i < unroll_lines; i++) { |
222 | if (cpu_has_mips_r6) { | |
223 | uasm_i_cache(pp, op, 0, t0); | |
224 | uasm_i_addiu(pp, t0, t0, cache->linesz); | |
225 | } else { | |
226 | uasm_i_cache(pp, op, i * cache->linesz, t0); | |
227 | } | |
228 | } | |
3179d37e | 229 | |
0f2a1484 MC |
230 | if (!cpu_has_mips_r6) |
231 | /* Update the base address */ | |
232 | uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); | |
3179d37e PB |
233 | |
234 | /* Loop if we haven't reached the end address yet */ | |
235 | uasm_il_bne(pp, pr, t0, t1, lbl); | |
236 | uasm_i_nop(pp); | |
237 | } | |
238 | ||
ba750502 PB |
239 | static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, |
240 | struct uasm_reloc **pr, | |
241 | const struct cpuinfo_mips *cpu_info, | |
242 | int lbl) | |
3179d37e PB |
243 | { |
244 | unsigned i, fsb_size = 8; | |
245 | unsigned num_loads = (fsb_size * 3) / 2; | |
246 | unsigned line_stride = 2; | |
247 | unsigned line_size = cpu_info->dcache.linesz; | |
248 | unsigned perf_counter, perf_event; | |
249 | unsigned revision = cpu_info->processor_id & PRID_REV_MASK; | |
250 | ||
251 | /* | |
252 | * Determine whether this CPU requires an FSB flush, and if so which | |
253 | * performance counter/event reflect stalls due to a full FSB. | |
254 | */ | |
255 | switch (__get_cpu_type(cpu_info->cputype)) { | |
256 | case CPU_INTERAPTIV: | |
257 | perf_counter = 1; | |
258 | perf_event = 51; | |
259 | break; | |
260 | ||
261 | case CPU_PROAPTIV: | |
262 | /* Newer proAptiv cores don't require this workaround */ | |
263 | if (revision >= PRID_REV_ENCODE_332(1, 1, 0)) | |
264 | return 0; | |
265 | ||
266 | /* On older ones it's unavailable */ | |
267 | return -1; | |
268 | ||
3179d37e | 269 | default: |
b97d0b90 MR |
270 | /* Assume that the CPU does not need this workaround */ |
271 | return 0; | |
3179d37e PB |
272 | } |
273 | ||
274 | /* | |
275 | * Ensure that the fill/store buffer (FSB) is not holding the results | |
276 | * of a prefetch, since if it is then the CPC sequencer may become | |
277 | * stuck in the D3 (ClrBus) state whilst entering a low power state. | |
278 | */ | |
279 | ||
280 | /* Preserve perf counter setup */ | |
281 | uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ | |
282 | uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ | |
283 | ||
284 | /* Setup perf counter to count FSB full pipeline stalls */ | |
285 | uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); | |
286 | uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ | |
287 | uasm_i_ehb(pp); | |
288 | uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ | |
289 | uasm_i_ehb(pp); | |
290 | ||
291 | /* Base address for loads */ | |
292 | UASM_i_LA(pp, t0, (long)CKSEG0); | |
293 | ||
294 | /* Start of clear loop */ | |
295 | uasm_build_label(pl, *pp, lbl); | |
296 | ||
297 | /* Perform some loads to fill the FSB */ | |
298 | for (i = 0; i < num_loads; i++) | |
299 | uasm_i_lw(pp, zero, i * line_size * line_stride, t0); | |
300 | ||
301 | /* | |
302 | * Invalidate the new D-cache entries so that the cache will need | |
303 | * refilling (via the FSB) if the loop is executed again. | |
304 | */ | |
305 | for (i = 0; i < num_loads; i++) { | |
306 | uasm_i_cache(pp, Hit_Invalidate_D, | |
307 | i * line_size * line_stride, t0); | |
308 | uasm_i_cache(pp, Hit_Writeback_Inv_SD, | |
309 | i * line_size * line_stride, t0); | |
310 | } | |
311 | ||
f6b43d93 | 312 | /* Barrier ensuring previous cache invalidates are complete */ |
90b084b1 | 313 | uasm_i_sync(pp, STYPE_SYNC); |
3179d37e PB |
314 | uasm_i_ehb(pp); |
315 | ||
316 | /* Check whether the pipeline stalled due to the FSB being full */ | |
317 | uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ | |
318 | ||
319 | /* Loop if it didn't */ | |
320 | uasm_il_beqz(pp, pr, t1, lbl); | |
321 | uasm_i_nop(pp); | |
322 | ||
323 | /* Restore perf counter 1. The count may well now be wrong... */ | |
324 | uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ | |
325 | uasm_i_ehb(pp); | |
326 | uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ | |
327 | uasm_i_ehb(pp); | |
328 | ||
329 | return 0; | |
330 | } | |
331 | ||
ba750502 PB |
332 | static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, |
333 | struct uasm_reloc **pr, | |
334 | unsigned r_addr, int lbl) | |
3179d37e PB |
335 | { |
336 | uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); | |
337 | uasm_build_label(pl, *pp, lbl); | |
338 | uasm_i_ll(pp, t1, 0, r_addr); | |
339 | uasm_i_or(pp, t1, t1, t0); | |
340 | uasm_i_sc(pp, t1, 0, r_addr); | |
341 | uasm_il_beqz(pp, pr, t1, lbl); | |
342 | uasm_i_nop(pp); | |
343 | } | |
344 | ||
ba750502 | 345 | static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) |
3179d37e PB |
346 | { |
347 | struct uasm_label *l = labels; | |
348 | struct uasm_reloc *r = relocs; | |
349 | u32 *buf, *p; | |
350 | const unsigned r_online = a0; | |
351 | const unsigned r_nc_count = a1; | |
352 | const unsigned r_pcohctl = t7; | |
353 | const unsigned max_instrs = 256; | |
354 | unsigned cpc_cmd; | |
355 | int err; | |
356 | enum { | |
357 | lbl_incready = 1, | |
358 | lbl_poll_cont, | |
359 | lbl_secondary_hang, | |
360 | lbl_disable_coherence, | |
361 | lbl_flush_fsb, | |
362 | lbl_invicache, | |
363 | lbl_flushdcache, | |
364 | lbl_hang, | |
365 | lbl_set_cont, | |
366 | lbl_secondary_cont, | |
367 | lbl_decready, | |
368 | }; | |
369 | ||
370 | /* Allocate a buffer to hold the generated code */ | |
371 | p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); | |
372 | if (!buf) | |
373 | return NULL; | |
374 | ||
375 | /* Clear labels & relocs ready for (re)use */ | |
376 | memset(labels, 0, sizeof(labels)); | |
377 | memset(relocs, 0, sizeof(relocs)); | |
378 | ||
97f2645f | 379 | if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { |
064231e5 PB |
380 | /* Power gating relies upon CPS SMP */ |
381 | if (!mips_cps_smp_in_use()) | |
382 | goto out_err; | |
383 | ||
3179d37e PB |
384 | /* |
385 | * Save CPU state. Note the non-standard calling convention | |
386 | * with the return address placed in v0 to avoid clobbering | |
387 | * the ra register before it is saved. | |
388 | */ | |
389 | UASM_i_LA(&p, t0, (long)mips_cps_pm_save); | |
390 | uasm_i_jalr(&p, v0, t0); | |
391 | uasm_i_nop(&p); | |
392 | } | |
393 | ||
394 | /* | |
395 | * Load addresses of required CM & CPC registers. This is done early | |
396 | * because they're needed in both the enable & disable coherence steps | |
397 | * but in the coupled case the enable step will only run on one VPE. | |
398 | */ | |
399 | UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence()); | |
400 | ||
401 | if (coupled_coherence) { | |
402 | /* Increment ready_count */ | |
85e540be | 403 | uasm_i_sync(&p, STYPE_SYNC_MB); |
3179d37e PB |
404 | uasm_build_label(&l, p, lbl_incready); |
405 | uasm_i_ll(&p, t1, 0, r_nc_count); | |
406 | uasm_i_addiu(&p, t2, t1, 1); | |
407 | uasm_i_sc(&p, t2, 0, r_nc_count); | |
408 | uasm_il_beqz(&p, &r, t2, lbl_incready); | |
409 | uasm_i_addiu(&p, t1, t1, 1); | |
410 | ||
f6b43d93 | 411 | /* Barrier ensuring all CPUs see the updated r_nc_count value */ |
85e540be | 412 | uasm_i_sync(&p, STYPE_SYNC_MB); |
3179d37e PB |
413 | |
414 | /* | |
415 | * If this is the last VPE to become ready for non-coherence | |
416 | * then it should branch below. | |
417 | */ | |
418 | uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); | |
419 | uasm_i_nop(&p); | |
420 | ||
421 | if (state < CPS_PM_POWER_GATED) { | |
422 | /* | |
423 | * Otherwise this is not the last VPE to become ready | |
424 | * for non-coherence. It needs to wait until coherence | |
425 | * has been disabled before proceeding, which it will do | |
426 | * by polling for the top bit of ready_count being set. | |
427 | */ | |
428 | uasm_i_addiu(&p, t1, zero, -1); | |
429 | uasm_build_label(&l, p, lbl_poll_cont); | |
430 | uasm_i_lw(&p, t0, 0, r_nc_count); | |
431 | uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); | |
432 | uasm_i_ehb(&p); | |
929d4f51 MR |
433 | if (cpu_has_mipsmt) |
434 | uasm_i_yield(&p, zero, t1); | |
3179d37e PB |
435 | uasm_il_b(&p, &r, lbl_poll_cont); |
436 | uasm_i_nop(&p); | |
437 | } else { | |
438 | /* | |
439 | * The core will lose power & this VPE will not continue | |
440 | * so it can simply halt here. | |
441 | */ | |
929d4f51 MR |
442 | if (cpu_has_mipsmt) { |
443 | /* Halt the VPE via C0 tchalt register */ | |
444 | uasm_i_addiu(&p, t0, zero, TCHALT_H); | |
445 | uasm_i_mtc0(&p, t0, 2, 4); | |
446 | } else if (cpu_has_vp) { | |
447 | /* Halt the VP via the CPC VP_STOP register */ | |
448 | unsigned int vpe_id; | |
449 | ||
450 | vpe_id = cpu_vpe_id(&cpu_data[cpu]); | |
451 | uasm_i_addiu(&p, t0, zero, 1 << vpe_id); | |
452 | UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop()); | |
453 | uasm_i_sw(&p, t0, 0, t1); | |
454 | } else { | |
455 | BUG(); | |
456 | } | |
3179d37e PB |
457 | uasm_build_label(&l, p, lbl_secondary_hang); |
458 | uasm_il_b(&p, &r, lbl_secondary_hang); | |
459 | uasm_i_nop(&p); | |
460 | } | |
461 | } | |
462 | ||
463 | /* | |
464 | * This is the point of no return - this VPE will now proceed to | |
465 | * disable coherence. At this point we *must* be sure that no other | |
466 | * VPE within the core will interfere with the L1 dcache. | |
467 | */ | |
468 | uasm_build_label(&l, p, lbl_disable_coherence); | |
469 | ||
470 | /* Invalidate the L1 icache */ | |
471 | cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, | |
472 | Index_Invalidate_I, lbl_invicache); | |
473 | ||
474 | /* Writeback & invalidate the L1 dcache */ | |
475 | cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, | |
476 | Index_Writeback_Inv_D, lbl_flushdcache); | |
477 | ||
f6b43d93 | 478 | /* Barrier ensuring previous cache invalidates are complete */ |
90b084b1 | 479 | uasm_i_sync(&p, STYPE_SYNC); |
3179d37e PB |
480 | uasm_i_ehb(&p); |
481 | ||
77451997 MR |
482 | if (mips_cm_revision() < CM_REV_CM3) { |
483 | /* | |
484 | * Disable all but self interventions. The load from COHCTL is | |
485 | * defined by the interAptiv & proAptiv SUMs as ensuring that the | |
486 | * operation resulting from the preceding store is complete. | |
487 | */ | |
f875a832 | 488 | uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu])); |
77451997 MR |
489 | uasm_i_sw(&p, t0, 0, r_pcohctl); |
490 | uasm_i_lw(&p, t0, 0, r_pcohctl); | |
491 | ||
492 | /* Barrier to ensure write to coherence control is complete */ | |
493 | uasm_i_sync(&p, STYPE_SYNC); | |
494 | uasm_i_ehb(&p); | |
495 | } | |
3179d37e PB |
496 | |
497 | /* Disable coherence */ | |
498 | uasm_i_sw(&p, zero, 0, r_pcohctl); | |
499 | uasm_i_lw(&p, t0, 0, r_pcohctl); | |
500 | ||
501 | if (state >= CPS_PM_CLOCK_GATED) { | |
502 | err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], | |
503 | lbl_flush_fsb); | |
504 | if (err) | |
505 | goto out_err; | |
506 | ||
507 | /* Determine the CPC command to issue */ | |
508 | switch (state) { | |
509 | case CPS_PM_CLOCK_GATED: | |
510 | cpc_cmd = CPC_Cx_CMD_CLOCKOFF; | |
511 | break; | |
512 | case CPS_PM_POWER_GATED: | |
513 | cpc_cmd = CPC_Cx_CMD_PWRDOWN; | |
514 | break; | |
515 | default: | |
516 | BUG(); | |
517 | goto out_err; | |
518 | } | |
519 | ||
520 | /* Issue the CPC command */ | |
521 | UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); | |
522 | uasm_i_addiu(&p, t1, zero, cpc_cmd); | |
523 | uasm_i_sw(&p, t1, 0, t0); | |
524 | ||
525 | if (state == CPS_PM_POWER_GATED) { | |
526 | /* If anything goes wrong just hang */ | |
527 | uasm_build_label(&l, p, lbl_hang); | |
528 | uasm_il_b(&p, &r, lbl_hang); | |
529 | uasm_i_nop(&p); | |
530 | ||
531 | /* | |
532 | * There's no point generating more code, the core is | |
533 | * powered down & if powered back up will run from the | |
534 | * reset vector not from here. | |
535 | */ | |
536 | goto gen_done; | |
537 | } | |
538 | ||
f6b43d93 | 539 | /* Barrier to ensure write to CPC command is complete */ |
90b084b1 | 540 | uasm_i_sync(&p, STYPE_SYNC); |
3179d37e PB |
541 | uasm_i_ehb(&p); |
542 | } | |
543 | ||
544 | if (state == CPS_PM_NC_WAIT) { | |
545 | /* | |
546 | * At this point it is safe for all VPEs to proceed with | |
547 | * execution. This VPE will set the top bit of ready_count | |
548 | * to indicate to the other VPEs that they may continue. | |
549 | */ | |
550 | if (coupled_coherence) | |
551 | cps_gen_set_top_bit(&p, &l, &r, r_nc_count, | |
552 | lbl_set_cont); | |
553 | ||
554 | /* | |
555 | * VPEs which did not disable coherence will continue | |
556 | * executing, after coherence has been disabled, from this | |
557 | * point. | |
558 | */ | |
559 | uasm_build_label(&l, p, lbl_secondary_cont); | |
560 | ||
561 | /* Now perform our wait */ | |
562 | uasm_i_wait(&p, 0); | |
563 | } | |
564 | ||
565 | /* | |
566 | * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs | |
567 | * will run this. The first will actually re-enable coherence & the | |
568 | * rest will just be performing a rather unusual nop. | |
569 | */ | |
77451997 | 570 | uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3 |
93c5bba5 PB |
571 | ? CM_GCR_Cx_COHERENCE_COHDOMAINEN |
572 | : CM3_GCR_Cx_COHERENCE_COHEN); | |
77451997 | 573 | |
3179d37e PB |
574 | uasm_i_sw(&p, t0, 0, r_pcohctl); |
575 | uasm_i_lw(&p, t0, 0, r_pcohctl); | |
576 | ||
f6b43d93 | 577 | /* Barrier to ensure write to coherence control is complete */ |
90b084b1 | 578 | uasm_i_sync(&p, STYPE_SYNC); |
3179d37e PB |
579 | uasm_i_ehb(&p); |
580 | ||
581 | if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { | |
582 | /* Decrement ready_count */ | |
583 | uasm_build_label(&l, p, lbl_decready); | |
85e540be | 584 | uasm_i_sync(&p, STYPE_SYNC_MB); |
3179d37e PB |
585 | uasm_i_ll(&p, t1, 0, r_nc_count); |
586 | uasm_i_addiu(&p, t2, t1, -1); | |
587 | uasm_i_sc(&p, t2, 0, r_nc_count); | |
588 | uasm_il_beqz(&p, &r, t2, lbl_decready); | |
589 | uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); | |
590 | ||
f6b43d93 | 591 | /* Barrier ensuring all CPUs see the updated r_nc_count value */ |
85e540be | 592 | uasm_i_sync(&p, STYPE_SYNC_MB); |
3179d37e PB |
593 | } |
594 | ||
595 | if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { | |
596 | /* | |
597 | * At this point it is safe for all VPEs to proceed with | |
598 | * execution. This VPE will set the top bit of ready_count | |
599 | * to indicate to the other VPEs that they may continue. | |
600 | */ | |
601 | cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); | |
602 | ||
603 | /* | |
604 | * This core will be reliant upon another core sending a | |
605 | * power-up command to the CPC in order to resume operation. | |
606 | * Thus an arbitrary VPE can't trigger the core leaving the | |
607 | * idle state and the one that disables coherence might as well | |
608 | * be the one to re-enable it. The rest will continue from here | |
609 | * after that has been done. | |
610 | */ | |
611 | uasm_build_label(&l, p, lbl_secondary_cont); | |
612 | ||
f6b43d93 | 613 | /* Barrier ensuring all CPUs see the updated r_nc_count value */ |
85e540be | 614 | uasm_i_sync(&p, STYPE_SYNC_MB); |
3179d37e PB |
615 | } |
616 | ||
617 | /* The core is coherent, time to return to C code */ | |
618 | uasm_i_jr(&p, ra); | |
619 | uasm_i_nop(&p); | |
620 | ||
621 | gen_done: | |
622 | /* Ensure the code didn't exceed the resources allocated for it */ | |
623 | BUG_ON((p - buf) > max_instrs); | |
624 | BUG_ON((l - labels) > ARRAY_SIZE(labels)); | |
625 | BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); | |
626 | ||
627 | /* Patch branch offsets */ | |
628 | uasm_resolve_relocs(relocs, labels); | |
629 | ||
630 | /* Flush the icache */ | |
631 | local_flush_icache_range((unsigned long)buf, (unsigned long)p); | |
632 | ||
633 | return buf; | |
634 | out_err: | |
635 | kfree(buf); | |
636 | return NULL; | |
637 | } | |
638 | ||
ba750502 | 639 | static int cps_pm_online_cpu(unsigned int cpu) |
3179d37e PB |
640 | { |
641 | enum cps_pm_state state; | |
f875a832 | 642 | unsigned core = cpu_core(&cpu_data[cpu]); |
3179d37e PB |
643 | void *entry_fn, *core_rc; |
644 | ||
645 | for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { | |
646 | if (per_cpu(nc_asm_enter, core)[state]) | |
647 | continue; | |
648 | if (!test_bit(state, state_support)) | |
649 | continue; | |
650 | ||
651 | entry_fn = cps_gen_entry_code(cpu, state); | |
652 | if (!entry_fn) { | |
653 | pr_err("Failed to generate core %u state %u entry\n", | |
654 | core, state); | |
655 | clear_bit(state, state_support); | |
656 | } | |
657 | ||
658 | per_cpu(nc_asm_enter, core)[state] = entry_fn; | |
659 | } | |
660 | ||
661 | if (!per_cpu(ready_count, core)) { | |
161c51cc | 662 | core_rc = kmalloc(sizeof(u32), GFP_KERNEL); |
3179d37e PB |
663 | if (!core_rc) { |
664 | pr_err("Failed allocate core %u ready_count\n", core); | |
665 | return -ENOMEM; | |
666 | } | |
3179d37e PB |
667 | per_cpu(ready_count, core) = core_rc; |
668 | } | |
669 | ||
670 | return 0; | |
671 | } | |
672 | ||
673 | static int __init cps_pm_init(void) | |
674 | { | |
3179d37e PB |
675 | /* A CM is required for all non-coherent states */ |
676 | if (!mips_cm_present()) { | |
677 | pr_warn("pm-cps: no CM, non-coherent states unavailable\n"); | |
ba750502 | 678 | return 0; |
3179d37e PB |
679 | } |
680 | ||
681 | /* | |
682 | * If interrupts were enabled whilst running a wait instruction on a | |
683 | * non-coherent core then the VPE may end up processing interrupts | |
684 | * whilst non-coherent. That would be bad. | |
685 | */ | |
686 | if (cpu_wait == r4k_wait_irqoff) | |
687 | set_bit(CPS_PM_NC_WAIT, state_support); | |
688 | else | |
689 | pr_warn("pm-cps: non-coherent wait unavailable\n"); | |
690 | ||
691 | /* Detect whether a CPC is present */ | |
692 | if (mips_cpc_present()) { | |
693 | /* Detect whether clock gating is implemented */ | |
829ca2be | 694 | if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL) |
3179d37e PB |
695 | set_bit(CPS_PM_CLOCK_GATED, state_support); |
696 | else | |
697 | pr_warn("pm-cps: CPC does not support clock gating\n"); | |
698 | ||
699 | /* Power gating is available with CPS SMP & any CPC */ | |
700 | if (mips_cps_smp_in_use()) | |
701 | set_bit(CPS_PM_POWER_GATED, state_support); | |
702 | else | |
703 | pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n"); | |
704 | } else { | |
705 | pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); | |
706 | } | |
707 | ||
73c1b41e | 708 | return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mips/cps_pm:online", |
ba750502 | 709 | cps_pm_online_cpu, NULL); |
3179d37e PB |
710 | } |
711 | arch_initcall(cps_pm_init); |